< prev index next >

src/hotspot/share/code/codeCache.cpp

Print this page

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 


  25 #include "code/codeBlob.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/codeHeapState.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/dependencyContext.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compilationPolicy.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "compiler/oopMap.hpp"
  37 #include "gc/shared/barrierSetNMethod.hpp"
  38 #include "gc/shared/classUnloadingContext.hpp"
  39 #include "gc/shared/collectedHeap.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "jvm_io.h"
  42 #include "logging/log.hpp"
  43 #include "logging/logStream.hpp"
  44 #include "memory/allocation.inline.hpp"

 153       scopes_data_size += nm->scopes_data_size();
 154       scopes_pcs_size  += nm->scopes_pcs_size();
 155     } else {
 156       code_size        += cb->code_size();
 157     }
 158   }
 159 };
 160 
 161 // Iterate over all CodeHeaps
 162 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 163 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 164 
 165 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 166 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 167 
 168 address CodeCache::_low_bound = nullptr;
 169 address CodeCache::_high_bound = nullptr;
 170 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 171 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 172 


 173 // Initialize arrays of CodeHeap subsets
 174 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 175 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 176 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 177 
 178 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 179   if (size < required_size) {
 180     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 181                          codeheap, size/K, required_size/K);
 182     err_msg title("Not enough space in %s to run VM", codeheap);
 183     err_msg message("%zuK < %zuK", size/K, required_size/K);
 184     vm_exit_during_initialization(title, message);
 185   }
 186 }
 187 
 188 struct CodeHeapInfo {
 189   size_t size;
 190   bool set;
 191   bool enabled;
 192 };
 193 
 194 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 195   assert(!heap->set, "sanity");
 196   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 197 }
 198 
 199 void CodeCache::initialize_heaps() {
 200 
 201   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 202   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 203   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 204 
 205   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 206   const size_t ps             = page_size(false, 8);
 207   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 208   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 209   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 210 
 211   // Prerequisites
 212   if (!heap_available(CodeBlobType::MethodProfiled)) {
 213     // For compatibility reasons, disabled tiered compilation overrides
 214     // segment size even if it is set explicitly.
 215     non_profiled.size += profiled.size;
 216     // Profiled code heap is not available, forcibly set size to 0
 217     profiled.size = 0;
 218     profiled.set = true;
 219     profiled.enabled = false;
 220   }
 221 
 222   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 223 
 224   size_t compiler_buffer_size = 0;
 225   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 226   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 227 













 228   if (!non_nmethod.set) {
 229     non_nmethod.size += compiler_buffer_size;
 230   }
 231 
 232   if (!profiled.set && !non_profiled.set) {
 233     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 234                                         (cache_size - non_nmethod.size) / 2 : min_size;
 235   }
 236 
 237   if (profiled.set && !non_profiled.set) {
 238     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 239   }
 240 
 241   if (!profiled.set && non_profiled.set) {
 242     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 243   }
 244 
 245   // Compatibility.
 246   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 247   if (!non_nmethod.set && profiled.set && non_profiled.set) {

 333     message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
 334 
 335     vm_exit_during_initialization("Invalid code heap sizes", message);
 336   }
 337 
 338   // Compatibility. Print warning if using large pages but not able to use the size given
 339   if (UseLargePages) {
 340     const size_t lg_ps = page_size(false, 1);
 341     if (ps < lg_ps) {
 342       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 343                              "Reverting to smaller page size (" PROPERFMT ").",
 344                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 345     }
 346   }
 347 
 348   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 349   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 350   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 351   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 352 






 353   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 354 
 355   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 356   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 357 
 358   size_t offset = 0;






 359   if (profiled.enabled) {
 360     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 361     offset += profiled.size;
 362     // Tier 2 and tier 3 (profiled) methods
 363     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 364   }
 365 
 366   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 367   offset += non_nmethod.size;
 368   // Non-nmethods (stubs, adapters, ...)
 369   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 370 
 371   if (non_profiled.enabled) {
 372     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 373     // Tier 1 and tier 4 (non-profiled) methods and native methods
 374     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 375   }
 376 }
 377 








 378 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 379   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 380                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 381 }
 382 
 383 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 384   // Align and reserve space for code cache
 385   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 386   const size_t rs_size = align_up(size, rs_align);
 387 
 388   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 389   if (!rs.is_reserved()) {
 390     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 391                                           rs_size/K));
 392   }
 393 
 394   // Initialize bounds
 395   _low_bound = (address)rs.base();
 396   _high_bound = _low_bound + rs.size();
 397   return rs;

1075 size_t CodeCache::max_capacity() {
1076   size_t max_cap = 0;
1077   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1078     max_cap += (*heap)->max_capacity();
1079   }
1080   return max_cap;
1081 }
1082 
1083 bool CodeCache::is_non_nmethod(address addr) {
1084   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1085   return blob->contains(addr);
1086 }
1087 
1088 size_t CodeCache::max_distance_to_non_nmethod() {
1089   if (!SegmentedCodeCache) {
1090     return ReservedCodeCacheSize;
1091   } else {
1092     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1093     // the max distance is minimized by placing the NonNMethod segment
1094     // in between MethodProfiled and MethodNonProfiled segments
1095     size_t dist1 = (size_t)blob->high() - (size_t)_low_bound;
1096     size_t dist2 = (size_t)_high_bound - (size_t)blob->low();
1097     return dist1 > dist2 ? dist1 : dist2;
1098   }
1099 }
1100 
1101 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1102 // is free, reverse_free_ratio() returns 4.
1103 // Since code heap for each type of code blobs falls forward to the next
1104 // type of code heap, return the reverse free ratio for the entire
1105 // code cache.
1106 double CodeCache::reverse_free_ratio() {
1107   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1108   double max = (double)max_capacity();
1109   double result = max / unallocated;
1110   assert (max >= unallocated, "Must be");
1111   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1112   return result;
1113 }
1114 
1115 size_t CodeCache::bytes_allocated_in_freelists() {
1116   size_t allocated_bytes = 0;

1221                             AnyObj::RESOURCE_AREA, mtInternal,
1222                             &DependencySignature::hash,
1223                             &DependencySignature::equals> DepTable;
1224 
1225   DepTable* table = new DepTable();
1226 
1227   // Iterate over live nmethods and check dependencies of all nmethods that are not
1228   // marked for deoptimization. A particular dependency is only checked once.
1229   NMethodIterator iter(NMethodIterator::not_unloading);
1230   while(iter.next()) {
1231     nmethod* nm = iter.method();
1232     // Only notify for live nmethods
1233     if (!nm->is_marked_for_deoptimization()) {
1234       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1235         // Construct abstraction of a dependency.
1236         DependencySignature* current_sig = new DependencySignature(deps);
1237 
1238         // Determine if dependency is already checked. table->put(...) returns
1239         // 'true' if the dependency is added (i.e., was not in the hashtable).
1240         if (table->put(*current_sig, 1)) {
1241           if (deps.check_dependency() != nullptr) {

1242             // Dependency checking failed. Print out information about the failed
1243             // dependency and finally fail with an assert. We can fail here, since
1244             // dependency checking is never done in a product build.
1245             tty->print_cr("Failed dependency:");
1246             changes.print();
1247             nm->print();
1248             nm->print_dependencies_on(tty);
1249             assert(false, "Should have been marked for deoptimization");
1250           }
1251         }
1252       }
1253     }
1254   }
1255 }
1256 #endif
1257 
1258 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1259   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1260 
1261   // search the hierarchy looking for nmethods which are affected by the loading of this class
1262 
1263   // then search the interfaces this class implements looking for nmethods
1264   // which might be dependent of the fact that an interface only had one
1265   // implementor.
1266   // nmethod::check_all_dependencies works only correctly, if no safepoint
1267   // can happen
1268   NoSafepointVerifier nsv;
1269   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1270     InstanceKlass* d = str.klass();







1271     d->mark_dependent_nmethods(deopt_scope, changes);
1272   }
1273 
1274 #ifndef PRODUCT
1275   if (VerifyDependencies) {
1276     // Object pointers are used as unique identifiers for dependency arguments. This
1277     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1278     dependentCheckTime.start();
1279     check_live_nmethods_dependencies(changes);
1280     dependentCheckTime.stop();
1281   }
1282 #endif
1283 }
1284 
1285 #if INCLUDE_JVMTI
1286 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1287 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1288 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1289 
1290 static void add_to_old_table(nmethod* c) {

1509 }
1510 PRAGMA_DIAG_POP
1511 
1512 void CodeCache::print_memory_overhead() {
1513   size_t wasted_bytes = 0;
1514   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1515       CodeHeap* curr_heap = *heap;
1516       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1517         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1518         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1519       }
1520   }
1521   // Print bytes that are allocated in the freelist
1522   ttyLocker ttl;
1523   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1524   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1525   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1526   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1527 }
1528 















































































1529 //------------------------------------------------------------------------------------------------
1530 // Non-product version
1531 
1532 #ifndef PRODUCT
1533 
1534 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1535   if (PrintCodeCache2) {  // Need to add a new flag
1536     ResourceMark rm;
1537     if (size == 0) {
1538       int s = cb->size();
1539       assert(s >= 0, "CodeBlob size is negative: %d", s);
1540       size = (uint) s;
1541     }
1542     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1543   }
1544 }
1545 
1546 void CodeCache::print_internals() {
1547   int nmethodCount = 0;
1548   int runtimeStubCount = 0;
1549   int upcallStubCount = 0;
1550   int adapterCount = 0;
1551   int mhAdapterCount = 0;
1552   int vtableBlobCount = 0;
1553   int deoptimizationStubCount = 0;
1554   int uncommonTrapStubCount = 0;
1555   int exceptionStubCount = 0;
1556   int safepointStubCount = 0;
1557   int bufferBlobCount = 0;
1558   int total = 0;
1559   int nmethodNotEntrant = 0;
1560   int nmethodJava = 0;
1561   int nmethodNative = 0;
1562   int max_nm_size = 0;
1563   ResourceMark rm;
1564 
1565   int i = 0;
1566   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1567     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1568       tty->print_cr("-- %s --", (*heap)->name());
1569     }
1570     FOR_ALL_BLOBS(cb, *heap) {
1571       total++;

1572       if (cb->is_nmethod()) {
1573         nmethod* nm = (nmethod*)cb;
1574 
1575         if (Verbose && nm->method() != nullptr) {
1576           ResourceMark rm;
1577           char *method_name = nm->method()->name_and_sig_as_C_string();
1578           tty->print("%s", method_name);
1579           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1580         }
1581 
1582         nmethodCount++;
1583 
1584         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1585         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1586 
1587         if(nm->method() != nullptr && nm->is_java_method()) {
1588           nmethodJava++;
1589           max_nm_size = MAX2(max_nm_size, nm->size());
1590         }
1591       } else if (cb->is_runtime_stub()) {
1592         runtimeStubCount++;
1593       } else if (cb->is_upcall_stub()) {
1594         upcallStubCount++;
1595       } else if (cb->is_deoptimization_stub()) {
1596         deoptimizationStubCount++;
1597       } else if (cb->is_uncommon_trap_stub()) {
1598         uncommonTrapStubCount++;
1599       } else if (cb->is_exception_stub()) {
1600         exceptionStubCount++;

1757       FOR_ALL_BLOBS(cb, *heap) {
1758         number_of_blobs++;
1759         code_size += cb->code_size();
1760         ImmutableOopMapSet* set = cb->oop_maps();
1761         if (set != nullptr) {
1762           number_of_oop_maps += set->count();
1763           map_size           += set->nr_of_bytes();
1764         }
1765       }
1766     }
1767     tty->print_cr("OopMaps");
1768     tty->print_cr("  #blobs    = %d", number_of_blobs);
1769     tty->print_cr("  code size = %d", code_size);
1770     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1771     tty->print_cr("  map size  = %d", map_size);
1772   }
1773 
1774 #endif // !PRODUCT
1775 }
1776 



















1777 void CodeCache::print_summary(outputStream* st, bool detailed) {
1778   int full_count = 0;
1779   julong total_used = 0;
1780   julong total_max_used = 0;
1781   julong total_free = 0;
1782   julong total_size = 0;
1783   FOR_ALL_HEAPS(heap_iterator) {
1784     CodeHeap* heap = (*heap_iterator);
1785     size_t total = (heap->high_boundary() - heap->low_boundary());
1786     if (_heaps->length() >= 1) {
1787       st->print("%s:", heap->name());
1788     } else {
1789       st->print("CodeCache:");
1790     }
1791     size_t size = total/K;
1792     size_t used = (total - heap->unallocated_capacity())/K;
1793     size_t max_used = heap->max_allocated_capacity()/K;
1794     size_t free = heap->unallocated_capacity()/K;
1795     total_size += size;
1796     total_used += used;

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotCacheAccess.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "compiler/compilationPolicy.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "compiler/oopMap.hpp"
  39 #include "gc/shared/barrierSetNMethod.hpp"
  40 #include "gc/shared/classUnloadingContext.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "jfr/jfrEvents.hpp"
  43 #include "jvm_io.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"

 155       scopes_data_size += nm->scopes_data_size();
 156       scopes_pcs_size  += nm->scopes_pcs_size();
 157     } else {
 158       code_size        += cb->code_size();
 159     }
 160   }
 161 };
 162 
 163 // Iterate over all CodeHeaps
 164 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 165 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 166 
 167 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 168 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb))
 169 
 170 address CodeCache::_low_bound = nullptr;
 171 address CodeCache::_high_bound = nullptr;
 172 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0;
 173 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr;
 174 
 175 static ReservedSpace _cds_code_space;
 176 
 177 // Initialize arrays of CodeHeap subsets
 178 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 179 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 180 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
 181 
 182 static void check_min_size(const char* codeheap, size_t size, size_t required_size) {
 183   if (size < required_size) {
 184     log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK",
 185                          codeheap, size/K, required_size/K);
 186     err_msg title("Not enough space in %s to run VM", codeheap);
 187     err_msg message("%zuK < %zuK", size/K, required_size/K);
 188     vm_exit_during_initialization(title, message);
 189   }
 190 }
 191 
 192 struct CodeHeapInfo {
 193   size_t size;
 194   bool set;
 195   bool enabled;
 196 };
 197 
 198 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
 199   assert(!heap->set, "sanity");
 200   heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
 201 }
 202 
 203 void CodeCache::initialize_heaps() {

 204   CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true};
 205   CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true};
 206   CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true};
 207 
 208   const bool cache_size_set   = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
 209   const size_t ps             = page_size(false, 8);
 210   const size_t min_size       = MAX2(os::vm_allocation_granularity(), ps);
 211   const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
 212   size_t cache_size           = align_up(ReservedCodeCacheSize, min_size);
 213 
 214   // Prerequisites
 215   if (!heap_available(CodeBlobType::MethodProfiled)) {
 216     // For compatibility reasons, disabled tiered compilation overrides
 217     // segment size even if it is set explicitly.
 218     non_profiled.size += profiled.size;
 219     // Profiled code heap is not available, forcibly set size to 0
 220     profiled.size = 0;
 221     profiled.set = true;
 222     profiled.enabled = false;
 223   }
 224 
 225   assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap");
 226 
 227   size_t compiler_buffer_size = 0;
 228   COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size());
 229   COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size());
 230 
 231   // During AOT assembly phase more compiler threads are used
 232   // and C2 temp buffer is bigger.
 233   // But due to rounding issue the total code cache size could be smaller
 234   // than during production run. We can not use AOT code in such case
 235   // because branch and call instructions will be incorrect.
 236   //
 237   // Increase code cache size to guarantee that total size
 238   // will be bigger during assembly phase.
 239   if (AOTCodeCache::maybe_dumping_code()) {
 240     cache_size += align_up(compiler_buffer_size, min_size);
 241     cache_size = MIN2(cache_size, CODE_CACHE_SIZE_LIMIT);
 242   }
 243 
 244   if (!non_nmethod.set) {
 245     non_nmethod.size += compiler_buffer_size;
 246   }
 247 
 248   if (!profiled.set && !non_profiled.set) {
 249     non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
 250                                         (cache_size - non_nmethod.size) / 2 : min_size;
 251   }
 252 
 253   if (profiled.set && !non_profiled.set) {
 254     set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size);
 255   }
 256 
 257   if (!profiled.set && non_profiled.set) {
 258     set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size);
 259   }
 260 
 261   // Compatibility.
 262   size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
 263   if (!non_nmethod.set && profiled.set && non_profiled.set) {

 349     message.append("ReservedCodeCacheSize (%zuK).", cache_size/K);
 350 
 351     vm_exit_during_initialization("Invalid code heap sizes", message);
 352   }
 353 
 354   // Compatibility. Print warning if using large pages but not able to use the size given
 355   if (UseLargePages) {
 356     const size_t lg_ps = page_size(false, 1);
 357     if (ps < lg_ps) {
 358       log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. "
 359                              "Reverting to smaller page size (" PROPERFMT ").",
 360                              PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps));
 361     }
 362   }
 363 
 364   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
 365   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
 366   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size);
 367   FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size);
 368 
 369   const size_t cds_code_size = 0;
 370   // FIXME: we should not increase CodeCache size - it affects branches.
 371   // Instead we need to create separate code heap in CodeCache for AOT code.
 372   // const size_t cds_code_size = align_up(AOTCacheAccess::get_aot_code_region_size(), min_size);
 373   // cache_size += cds_code_size;
 374 
 375   ReservedSpace rs = reserve_heap_memory(cache_size, ps);
 376 
 377   // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
 378   LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size());
 379 
 380   size_t offset = 0;
 381   if (cds_code_size > 0) {
 382     // FIXME: use CodeHeapInfo for this hack ...
 383     _cds_code_space = rs.partition(offset, cds_code_size);
 384     offset += cds_code_size;
 385   }
 386 
 387   if (profiled.enabled) {
 388     ReservedSpace profiled_space = rs.partition(offset, profiled.size);
 389     offset += profiled.size;
 390     // Tier 2 and tier 3 (profiled) methods
 391     add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 392   }
 393 
 394   ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size);
 395   offset += non_nmethod.size;
 396   // Non-nmethods (stubs, adapters, ...)
 397   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 398 
 399   if (non_profiled.enabled) {
 400     ReservedSpace non_profiled_space  = rs.partition(offset, non_profiled.size);
 401     // Tier 1 and tier 4 (non-profiled) methods and native methods
 402     add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 403   }
 404 }
 405 
 406 void* CodeCache::map_aot_code() {
 407   if (_cds_code_space.size() > 0 && AOTCacheAccess::map_aot_code_region(_cds_code_space)) {
 408     return _cds_code_space.base();
 409   } else {
 410     return nullptr;
 411   }
 412 }
 413 
 414 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 415   return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 416                    os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 417 }
 418 
 419 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) {
 420   // Align and reserve space for code cache
 421   const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity());
 422   const size_t rs_size = align_up(size, rs_align);
 423 
 424   ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps);
 425   if (!rs.is_reserved()) {
 426     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)",
 427                                           rs_size/K));
 428   }
 429 
 430   // Initialize bounds
 431   _low_bound = (address)rs.base();
 432   _high_bound = _low_bound + rs.size();
 433   return rs;

1111 size_t CodeCache::max_capacity() {
1112   size_t max_cap = 0;
1113   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1114     max_cap += (*heap)->max_capacity();
1115   }
1116   return max_cap;
1117 }
1118 
1119 bool CodeCache::is_non_nmethod(address addr) {
1120   CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1121   return blob->contains(addr);
1122 }
1123 
1124 size_t CodeCache::max_distance_to_non_nmethod() {
1125   if (!SegmentedCodeCache) {
1126     return ReservedCodeCacheSize;
1127   } else {
1128     CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod);
1129     // the max distance is minimized by placing the NonNMethod segment
1130     // in between MethodProfiled and MethodNonProfiled segments
1131     size_t dist1 = (size_t)blob->high_boundary() - (size_t)_low_bound;
1132     size_t dist2 = (size_t)_high_bound - (size_t)blob->low_boundary();
1133     return dist1 > dist2 ? dist1 : dist2;
1134   }
1135 }
1136 
1137 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
1138 // is free, reverse_free_ratio() returns 4.
1139 // Since code heap for each type of code blobs falls forward to the next
1140 // type of code heap, return the reverse free ratio for the entire
1141 // code cache.
1142 double CodeCache::reverse_free_ratio() {
1143   double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0;
1144   double max = (double)max_capacity();
1145   double result = max / unallocated;
1146   assert (max >= unallocated, "Must be");
1147   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1148   return result;
1149 }
1150 
1151 size_t CodeCache::bytes_allocated_in_freelists() {
1152   size_t allocated_bytes = 0;

1257                             AnyObj::RESOURCE_AREA, mtInternal,
1258                             &DependencySignature::hash,
1259                             &DependencySignature::equals> DepTable;
1260 
1261   DepTable* table = new DepTable();
1262 
1263   // Iterate over live nmethods and check dependencies of all nmethods that are not
1264   // marked for deoptimization. A particular dependency is only checked once.
1265   NMethodIterator iter(NMethodIterator::not_unloading);
1266   while(iter.next()) {
1267     nmethod* nm = iter.method();
1268     // Only notify for live nmethods
1269     if (!nm->is_marked_for_deoptimization()) {
1270       for (Dependencies::DepStream deps(nm); deps.next(); ) {
1271         // Construct abstraction of a dependency.
1272         DependencySignature* current_sig = new DependencySignature(deps);
1273 
1274         // Determine if dependency is already checked. table->put(...) returns
1275         // 'true' if the dependency is added (i.e., was not in the hashtable).
1276         if (table->put(*current_sig, 1)) {
1277           Klass* witness = deps.check_dependency();
1278           if (witness != nullptr) {
1279             // Dependency checking failed. Print out information about the failed
1280             // dependency and finally fail with an assert. We can fail here, since
1281             // dependency checking is never done in a product build.
1282             deps.print_dependency(tty, witness, true);
1283             changes.print();
1284             nm->print();
1285             nm->print_dependencies_on(tty);
1286             assert(false, "Should have been marked for deoptimization");
1287           }
1288         }
1289       }
1290     }
1291   }
1292 }
1293 #endif
1294 
1295 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
1296   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1297 
1298   // search the hierarchy looking for nmethods which are affected by the loading of this class
1299 
1300   // then search the interfaces this class implements looking for nmethods
1301   // which might be dependent of the fact that an interface only had one
1302   // implementor.
1303   // nmethod::check_all_dependencies works only correctly, if no safepoint
1304   // can happen
1305   NoSafepointVerifier nsv;
1306   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1307     InstanceKlass* d = str.klass();
1308     {
1309       LogStreamHandle(Trace, dependencies) log;
1310       if (log.is_enabled()) {
1311         log.print("Processing context ");
1312         d->name()->print_value_on(&log);
1313       }
1314     }
1315     d->mark_dependent_nmethods(deopt_scope, changes);
1316   }
1317 
1318 #ifndef PRODUCT
1319   if (VerifyDependencies) {
1320     // Object pointers are used as unique identifiers for dependency arguments. This
1321     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1322     dependentCheckTime.start();
1323     check_live_nmethods_dependencies(changes);
1324     dependentCheckTime.stop();
1325   }
1326 #endif
1327 }
1328 
1329 #if INCLUDE_JVMTI
1330 // RedefineClasses support for saving nmethods that are dependent on "old" methods.
1331 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1332 static GrowableArray<nmethod*>* old_nmethod_table = nullptr;
1333 
1334 static void add_to_old_table(nmethod* c) {

1553 }
1554 PRAGMA_DIAG_POP
1555 
1556 void CodeCache::print_memory_overhead() {
1557   size_t wasted_bytes = 0;
1558   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1559       CodeHeap* curr_heap = *heap;
1560       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) {
1561         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1562         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1563       }
1564   }
1565   // Print bytes that are allocated in the freelist
1566   ttyLocker ttl;
1567   tty->print_cr("Number of elements in freelist: %zd",       freelists_length());
1568   tty->print_cr("Allocated in freelist:          %zdkB",  bytes_allocated_in_freelists()/K);
1569   tty->print_cr("Unused bytes in CodeBlobs:      %zdkB",  (wasted_bytes/K));
1570   tty->print_cr("Segment map size:               %zdkB",  allocated_segments()/K); // 1 byte per segment
1571 }
1572 
1573 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) {
1574   if (total > 0) {
1575     double ratio = (100.0 * used) / total;
1576     st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio);
1577   }
1578 }
1579 
1580 void CodeCache::print_nmethod_statistics_on(outputStream* st) {
1581   int stats     [2][6][3][2] = {0};
1582   int stats_used[2][6][3][2] = {0};
1583 
1584   int total_osr = 0;
1585   int total_entrant = 0;
1586   int total_non_entrant = 0;
1587   int total_other = 0;
1588   int total_used = 0;
1589 
1590   NMethodIterator iter(NMethodIterator::all);
1591   while (iter.next()) {
1592     nmethod* nm = iter.method();
1593     if (nm->is_in_use()) {
1594       ++total_entrant;
1595     } else if (nm->is_not_entrant()) {
1596       ++total_non_entrant;
1597     } else {
1598       ++total_other;
1599     }
1600     if (nm->is_osr_method()) {
1601       ++total_osr;
1602     }
1603     if (nm->used()) {
1604       ++total_used;
1605     }
1606     assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, "");
1607 
1608     int idx1 = nm->is_aot() ? 1 : 0;
1609     int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0);
1610     int idx3 = (nm->is_in_use()      ? 0 :
1611                (nm->is_not_entrant() ? 1 :
1612                                        2));
1613     int idx4 = (nm->is_osr_method() ? 1 : 0);
1614     stats[idx1][idx2][idx3][idx4] += 1;
1615     if (nm->used()) {
1616       stats_used[idx1][idx2][idx3][idx4] += 1;
1617     }
1618   }
1619 
1620   st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ",
1621                total_entrant + total_non_entrant + total_other,
1622                total_entrant, total_non_entrant, total_osr);
1623   if (total_other > 0) {
1624     st->print("; %d other", total_other);
1625   }
1626   st->print_cr(")");
1627 
1628   for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) {
1629     int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0];
1630     int total_osr    = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1];
1631     if (total_normal + total_osr > 0) {
1632       st->print("  Tier%d:", i);
1633       print_helper1(st,      "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]);
1634       print_helper1(st, "; osr:", total_osr,    stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]);
1635       st->cr();
1636     }
1637   }
1638   st->cr();
1639   for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) {
1640     int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0];
1641     int total_osr    = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1];
1642     assert(total_osr == 0, "sanity");
1643     if (total_normal + total_osr > 0) {
1644       st->print("  AOT Code T%d:", i);
1645       print_helper1(st,      "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]);
1646       print_helper1(st, "; osr:", total_osr,    stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]);
1647       st->cr();
1648     }
1649   }
1650 }
1651 
1652 //------------------------------------------------------------------------------------------------
1653 // Non-product version
1654 
1655 #ifndef PRODUCT
1656 
1657 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) {
1658   if (PrintCodeCache2) {  // Need to add a new flag
1659     ResourceMark rm;
1660     if (size == 0) {
1661       int s = cb->size();
1662       assert(s >= 0, "CodeBlob size is negative: %d", s);
1663       size = (uint) s;
1664     }
1665     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1666   }
1667 }
1668 
1669 void CodeCache::print_internals() {
1670   int nmethodCount = 0;
1671   int runtimeStubCount = 0;
1672   int upcallStubCount = 0;
1673   int adapterCount = 0;
1674   int mhAdapterCount = 0;
1675   int vtableBlobCount = 0;
1676   int deoptimizationStubCount = 0;
1677   int uncommonTrapStubCount = 0;
1678   int exceptionStubCount = 0;
1679   int safepointStubCount = 0;
1680   int bufferBlobCount = 0;
1681   int total = 0;
1682   int nmethodNotEntrant = 0;
1683   int nmethodJava = 0;
1684   int nmethodNative = 0;
1685   int max_nm_size = 0;
1686   ResourceMark rm;
1687 
1688   int i = 0;
1689   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1690     int heap_total = 0;
1691     tty->print_cr("-- %s --", (*heap)->name());

1692     FOR_ALL_BLOBS(cb, *heap) {
1693       total++;
1694       heap_total++;
1695       if (cb->is_nmethod()) {
1696         nmethod* nm = (nmethod*)cb;
1697 
1698         tty->print("%4d: ", heap_total);
1699         CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true);




1700 
1701         nmethodCount++;
1702 
1703         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1704         if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; }
1705 
1706         if(nm->method() != nullptr && nm->is_java_method()) {
1707           nmethodJava++;
1708           max_nm_size = MAX2(max_nm_size, nm->size());
1709         }
1710       } else if (cb->is_runtime_stub()) {
1711         runtimeStubCount++;
1712       } else if (cb->is_upcall_stub()) {
1713         upcallStubCount++;
1714       } else if (cb->is_deoptimization_stub()) {
1715         deoptimizationStubCount++;
1716       } else if (cb->is_uncommon_trap_stub()) {
1717         uncommonTrapStubCount++;
1718       } else if (cb->is_exception_stub()) {
1719         exceptionStubCount++;

1876       FOR_ALL_BLOBS(cb, *heap) {
1877         number_of_blobs++;
1878         code_size += cb->code_size();
1879         ImmutableOopMapSet* set = cb->oop_maps();
1880         if (set != nullptr) {
1881           number_of_oop_maps += set->count();
1882           map_size           += set->nr_of_bytes();
1883         }
1884       }
1885     }
1886     tty->print_cr("OopMaps");
1887     tty->print_cr("  #blobs    = %d", number_of_blobs);
1888     tty->print_cr("  code size = %d", code_size);
1889     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1890     tty->print_cr("  map size  = %d", map_size);
1891   }
1892 
1893 #endif // !PRODUCT
1894 }
1895 
1896 void CodeCache::print_nmethods_on(outputStream* st) {
1897   ResourceMark rm;
1898   int i = 0;
1899   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1900     st->print_cr("-- %s --", (*heap)->name());
1901     FOR_ALL_BLOBS(cb, *heap) {
1902       i++;
1903       if (cb->is_nmethod()) {
1904         nmethod* nm = (nmethod*)cb;
1905         st->print("%4d: ", i);
1906         CompileTask::print(st, nm, nullptr, true, false);
1907 
1908         const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' ');
1909         st->print_cr(" %c", non_entrant_char);
1910       }
1911     }
1912   }
1913 }
1914 
1915 void CodeCache::print_summary(outputStream* st, bool detailed) {
1916   int full_count = 0;
1917   julong total_used = 0;
1918   julong total_max_used = 0;
1919   julong total_free = 0;
1920   julong total_size = 0;
1921   FOR_ALL_HEAPS(heap_iterator) {
1922     CodeHeap* heap = (*heap_iterator);
1923     size_t total = (heap->high_boundary() - heap->low_boundary());
1924     if (_heaps->length() >= 1) {
1925       st->print("%s:", heap->name());
1926     } else {
1927       st->print("CodeCache:");
1928     }
1929     size_t size = total/K;
1930     size_t used = (total - heap->unallocated_capacity())/K;
1931     size_t max_used = heap->max_allocated_capacity()/K;
1932     size_t free = heap->unallocated_capacity()/K;
1933     total_size += size;
1934     total_used += used;
< prev index next >