1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveBuilder.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/dynamicArchive.hpp"
 29 #include "classfile/altHashing.hpp"
 30 #include "classfile/classLoaderData.hpp"
 31 #include "classfile/compactHashtable.hpp"
 32 #include "classfile/javaClasses.hpp"
 33 #include "classfile/symbolTable.hpp"
 34 #include "memory/allocation.inline.hpp"
 35 #include "memory/metaspaceClosure.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "runtime/atomic.hpp"
 39 #include "runtime/interfaceSupport.inline.hpp"
 40 #include "runtime/timerTrace.hpp"
 41 #include "runtime/trimNativeHeap.hpp"
 42 #include "services/diagnosticCommand.hpp"
 43 #include "utilities/concurrentHashTable.inline.hpp"
 44 #include "utilities/concurrentHashTableTasks.inline.hpp"
 45 #include "utilities/utf8.hpp"
 46 
 47 // We used to not resize at all, so let's be conservative
 48 // and not set it too short before we decide to resize,
 49 // to match previous startup behavior
 50 const double PREF_AVG_LIST_LEN = 8.0;
 51 // 2^24 is max size, like StringTable.
 52 const size_t END_SIZE = 24;
 53 // If a chain gets to 100 something might be wrong
 54 const size_t REHASH_LEN = 100;
 55 
 56 const size_t ON_STACK_BUFFER_LENGTH = 128;
 57 
 58 // --------------------------------------------------------------------------
 59 
 60 inline bool symbol_equals_compact_hashtable_entry(Symbol* value, const char* key, int len) {
 61   if (value->equals(key, len)) {
 62     return true;
 63   } else {
 64     return false;
 65   }
 66 }
 67 
 68 static OffsetCompactHashtable<
 69   const char*, Symbol*,
 70   symbol_equals_compact_hashtable_entry
 71 > _shared_table, _dynamic_shared_table, _shared_table_for_dumping;
 72 
 73 // --------------------------------------------------------------------------
 74 
 75 typedef ConcurrentHashTable<SymbolTableConfig, mtSymbol> SymbolTableHash;
 76 static SymbolTableHash* _local_table = nullptr;
 77 
 78 volatile bool SymbolTable::_has_work = 0;
 79 volatile bool SymbolTable::_needs_rehashing = false;
 80 
 81 // For statistics
 82 static size_t _symbols_removed = 0;
 83 static size_t _symbols_counted = 0;
 84 static size_t _current_size = 0;
 85 
 86 static volatile size_t _items_count = 0;
 87 static volatile bool   _has_items_to_clean = false;
 88 
 89 
 90 static volatile bool _alt_hash = false;
 91 
 92 #ifdef USE_LIBRARY_BASED_TLS_ONLY
 93 static volatile bool _lookup_shared_first = false;
 94 #else
 95 // "_lookup_shared_first" can get highly contended with many cores if multiple threads
 96 // are updating "lookup success history" in a global shared variable. If built-in TLS is available, use it.
 97 static THREAD_LOCAL bool _lookup_shared_first = false;
 98 #endif
 99 
100 // Static arena for symbols that are not deallocated
101 Arena* SymbolTable::_arena = nullptr;
102 
103 static bool _rehashed = false;
104 static uint64_t _alt_hash_seed = 0;
105 
106 static inline void log_trace_symboltable_helper(Symbol* sym, const char* msg) {
107 #ifndef PRODUCT
108   ResourceMark rm;
109   log_trace(symboltable)("%s [%s]", msg, sym->as_quoted_ascii());
110 #endif // PRODUCT
111 }
112 
113 // Pick hashing algorithm.
114 static unsigned int hash_symbol(const char* s, int len, bool useAlt) {
115   return useAlt ?
116   AltHashing::halfsiphash_32(_alt_hash_seed, (const uint8_t*)s, len) :
117   java_lang_String::hash_code((const jbyte*)s, len);
118 }
119 
120 #if INCLUDE_CDS
121 static unsigned int hash_shared_symbol(const char* s, int len) {
122   return java_lang_String::hash_code((const jbyte*)s, len);
123 }
124 #endif
125 
126 class SymbolTableConfig : public AllStatic {
127 
128 public:
129   typedef Symbol Value;  // value of the Node in the hashtable
130 
131   static uintx get_hash(Value const& value, bool* is_dead) {
132     *is_dead = (value.refcount() == 0);
133     if (*is_dead) {
134       return 0;
135     } else {
136       return hash_symbol((const char*)value.bytes(), value.utf8_length(), _alt_hash);
137     }
138   }
139   // We use default allocation/deallocation but counted
140   static void* allocate_node(void* context, size_t size, Value const& value) {
141     SymbolTable::item_added();
142     return allocate_node_impl(size, value);
143   }
144   static void free_node(void* context, void* memory, Value & value) {
145     // We get here because #1 some threads lost a race to insert a newly created Symbol
146     // or #2 we're cleaning up unused symbol.
147     // If #1, then the symbol can be either permanent,
148     // or regular newly created one (refcount==1)
149     // If #2, then the symbol is dead (refcount==0)
150     assert(value.is_permanent() || (value.refcount() == 1) || (value.refcount() == 0),
151            "refcount %d", value.refcount());
152 #if INCLUDE_CDS
153     if (CDSConfig::is_dumping_static_archive()) {
154       // We have allocated with MetaspaceShared::symbol_space_alloc(). No deallocation is needed.
155       // Unreferenced Symbols will not be copied into the archive.
156       return;
157     }
158 #endif
159     if (value.refcount() == 1) {
160       value.decrement_refcount();
161       assert(value.refcount() == 0, "expected dead symbol");
162     }
163     if (value.refcount() != PERM_REFCOUNT) {
164       FreeHeap(memory);
165     } else {
166       MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
167       // Deleting permanent symbol should not occur very often (insert race condition),
168       // so log it.
169       log_trace_symboltable_helper(&value, "Freeing permanent symbol");
170       size_t alloc_size = SymbolTableHash::get_dynamic_node_size(value.byte_size());
171       if (!SymbolTable::arena()->Afree(memory, alloc_size)) {
172         log_trace_symboltable_helper(&value, "Leaked permanent symbol");
173       }
174     }
175     SymbolTable::item_removed();
176   }
177 
178 private:
179   static void* allocate_node_impl(size_t size, Value const& value) {
180     size_t alloc_size = SymbolTableHash::get_dynamic_node_size(value.byte_size());
181 #if INCLUDE_CDS
182     if (CDSConfig::is_dumping_static_archive()) {
183       MutexLocker ml(DumpRegion_lock, Mutex::_no_safepoint_check_flag);
184       // To get deterministic output from -Xshare:dump, we ensure that Symbols are allocated in
185       // increasing addresses. When the symbols are copied into the archive, we preserve their
186       // relative address order (sorted, see ArchiveBuilder::gather_klasses_and_symbols).
187       //
188       // We cannot use arena because arena chunks are allocated by the OS. As a result, for example,
189       // the archived symbol of "java/lang/Object" may sometimes be lower than "java/lang/String", and
190       // sometimes be higher. This would cause non-deterministic contents in the archive.
191       DEBUG_ONLY(static void* last = 0);
192       void* p = (void*)MetaspaceShared::symbol_space_alloc(alloc_size);
193       assert(p > last, "must increase monotonically");
194       DEBUG_ONLY(last = p);
195       return p;
196     }
197 #endif
198     if (value.refcount() != PERM_REFCOUNT) {
199       return AllocateHeap(alloc_size, mtSymbol);
200     } else {
201       // Allocate to global arena
202       MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
203       return SymbolTable::arena()->Amalloc(alloc_size);
204     }
205   }
206 };
207 
208 void SymbolTable::create_table ()  {
209   size_t start_size_log_2 = ceil_log2(SymbolTableSize);
210   _current_size = ((size_t)1) << start_size_log_2;
211   log_trace(symboltable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
212                          _current_size, start_size_log_2);
213   _local_table = new SymbolTableHash(start_size_log_2, END_SIZE, REHASH_LEN, true);
214 
215   // Initialize the arena for global symbols, size passed in depends on CDS.
216   if (symbol_alloc_arena_size == 0) {
217     _arena = new (mtSymbol) Arena(mtSymbol);
218   } else {
219     _arena = new (mtSymbol) Arena(mtSymbol, Arena::Tag::tag_other, symbol_alloc_arena_size);
220   }
221 }
222 
223 void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); }
224 void SymbolTable::mark_has_items_to_clean()  { Atomic::store(&_has_items_to_clean, true); }
225 bool SymbolTable::has_items_to_clean()       { return Atomic::load(&_has_items_to_clean); }
226 
227 void SymbolTable::item_added() {
228   Atomic::inc(&_items_count);
229 }
230 
231 void SymbolTable::item_removed() {
232   Atomic::inc(&(_symbols_removed));
233   Atomic::dec(&_items_count);
234 }
235 
236 double SymbolTable::get_load_factor() {
237   return (double)_items_count/(double)_current_size;
238 }
239 
240 size_t SymbolTable::table_size() {
241   return ((size_t)1) << _local_table->get_size_log2(Thread::current());
242 }
243 
244 bool SymbolTable::has_work() { return Atomic::load_acquire(&_has_work); }
245 
246 void SymbolTable::trigger_cleanup() {
247   // Avoid churn on ServiceThread
248   if (!has_work()) {
249     MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
250     _has_work = true;
251     Service_lock->notify_all();
252   }
253 }
254 
255 class SymbolsDo : StackObj {
256   SymbolClosure *_cl;
257 public:
258   SymbolsDo(SymbolClosure *cl) : _cl(cl) {}
259   bool operator()(Symbol* value) {
260     assert(value != nullptr, "expected valid value");
261     _cl->do_symbol(&value);
262     return true;
263   };
264 };
265 
266 class SharedSymbolIterator {
267   SymbolClosure* _symbol_closure;
268 public:
269   SharedSymbolIterator(SymbolClosure* f) : _symbol_closure(f) {}
270   void do_value(Symbol* symbol) {
271     _symbol_closure->do_symbol(&symbol);
272   }
273 };
274 
275 // Call function for all symbols in the symbol table.
276 void SymbolTable::symbols_do(SymbolClosure *cl) {
277   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
278   // all symbols from shared table
279   SharedSymbolIterator iter(cl);
280   _shared_table.iterate(&iter);
281   _dynamic_shared_table.iterate(&iter);
282 
283   // all symbols from the dynamic table
284   SymbolsDo sd(cl);
285   _local_table->do_safepoint_scan(sd);
286 }
287 
288 // Call function for all symbols in shared table. Used by -XX:+PrintSharedArchiveAndExit
289 void SymbolTable::shared_symbols_do(SymbolClosure *cl) {
290   SharedSymbolIterator iter(cl);
291   _shared_table.iterate(&iter);
292   _dynamic_shared_table.iterate(&iter);
293 }
294 
295 Symbol* SymbolTable::lookup_dynamic(const char* name,
296                                     int len, unsigned int hash) {
297   Symbol* sym = do_lookup(name, len, hash);
298   assert((sym == nullptr) || sym->refcount() != 0, "refcount must not be zero");
299   return sym;
300 }
301 
302 #if INCLUDE_CDS
303 Symbol* SymbolTable::lookup_shared(const char* name,
304                                    int len, unsigned int hash) {
305   Symbol* sym = nullptr;
306   if (!_shared_table.empty()) {
307     if (_alt_hash) {
308       // hash_code parameter may use alternate hashing algorithm but the shared table
309       // always uses the same original hash code.
310       hash = hash_shared_symbol(name, len);
311     }
312     sym = _shared_table.lookup(name, hash, len);
313     if (sym == nullptr && DynamicArchive::is_mapped()) {
314       sym = _dynamic_shared_table.lookup(name, hash, len);
315     }
316   }
317   return sym;
318 }
319 #endif
320 
321 Symbol* SymbolTable::lookup_common(const char* name,
322                             int len, unsigned int hash) {
323   Symbol* sym;
324   if (_lookup_shared_first) {
325     sym = lookup_shared(name, len, hash);
326     if (sym == nullptr) {
327       _lookup_shared_first = false;
328       sym = lookup_dynamic(name, len, hash);
329     }
330   } else {
331     sym = lookup_dynamic(name, len, hash);
332     if (sym == nullptr) {
333       sym = lookup_shared(name, len, hash);
334       if (sym != nullptr) {
335         _lookup_shared_first = true;
336       }
337     }
338   }
339   return sym;
340 }
341 
342 Symbol* SymbolTable::new_symbol(const char* name, int len) {
343   assert(len <= Symbol::max_length(), "sanity");
344   unsigned int hash = hash_symbol(name, len, _alt_hash);
345   Symbol* sym = lookup_common(name, len, hash);
346   if (sym == nullptr) {
347     sym = do_add_if_needed(name, len, hash, /* is_permanent */ false);
348   }
349   assert(sym->refcount() != 0, "lookup should have incremented the count");
350   assert(sym->equals(name, len), "symbol must be properly initialized");
351   return sym;
352 }
353 
354 Symbol* SymbolTable::new_symbol(const Symbol* sym, int begin, int end) {
355   assert(begin <= end && end <= sym->utf8_length(), "just checking");
356   assert(sym->refcount() != 0, "require a valid symbol");
357   const char* name = (const char*)sym->base() + begin;
358   int len = end - begin;
359   assert(len <= Symbol::max_length(), "sanity");
360   unsigned int hash = hash_symbol(name, len, _alt_hash);
361   Symbol* found = lookup_common(name, len, hash);
362   if (found == nullptr) {
363     found = do_add_if_needed(name, len, hash, /* is_permanent */ false);
364   }
365   return found;
366 }
367 
368 class SymbolTableLookup : StackObj {
369 private:
370   uintx _hash;
371   int _len;
372   const char* _str;
373 public:
374   SymbolTableLookup(const char* key, int len, uintx hash)
375   : _hash(hash), _len(len), _str(key) {}
376   uintx get_hash() const {
377     return _hash;
378   }
379   // Note: When equals() returns "true", the symbol's refcount is incremented. This is
380   // needed to ensure that the symbol is kept alive before equals() returns to the caller,
381   // so that another thread cannot clean the symbol up concurrently. The caller is
382   // responsible for decrementing the refcount, when the symbol is no longer needed.
383   bool equals(Symbol* value) {
384     assert(value != nullptr, "expected valid value");
385     Symbol *sym = value;
386     if (sym->equals(_str, _len)) {
387       if (sym->try_increment_refcount()) {
388         // something is referencing this symbol now.
389         return true;
390       } else {
391         assert(sym->refcount() == 0, "expected dead symbol");
392         return false;
393       }
394     } else {
395       return false;
396     }
397   }
398   bool is_dead(Symbol* value) {
399     return value->refcount() == 0;
400   }
401 };
402 
403 class SymbolTableGet : public StackObj {
404   Symbol* _return;
405 public:
406   SymbolTableGet() : _return(nullptr) {}
407   void operator()(Symbol* value) {
408     assert(value != nullptr, "expected valid value");
409     _return = value;
410   }
411   Symbol* get_res_sym() const {
412     return _return;
413   }
414 };
415 
416 void SymbolTable::update_needs_rehash(bool rehash) {
417   if (rehash) {
418     _needs_rehashing = true;
419     trigger_cleanup();
420   }
421 }
422 
423 Symbol* SymbolTable::do_lookup(const char* name, int len, uintx hash) {
424   Thread* thread = Thread::current();
425   SymbolTableLookup lookup(name, len, hash);
426   SymbolTableGet stg;
427   bool rehash_warning = false;
428   _local_table->get(thread, lookup, stg, &rehash_warning);
429   update_needs_rehash(rehash_warning);
430   Symbol* sym = stg.get_res_sym();
431   assert((sym == nullptr) || sym->refcount() != 0, "found dead symbol");
432   return sym;
433 }
434 
435 Symbol* SymbolTable::lookup_only(const char* name, int len, unsigned int& hash) {
436   hash = hash_symbol(name, len, _alt_hash);
437   return lookup_common(name, len, hash);
438 }
439 
440 // Suggestion: Push unicode-based lookup all the way into the hashing
441 // and probing logic, so there is no need for convert_to_utf8 until
442 // an actual new Symbol* is created.
443 Symbol* SymbolTable::new_symbol(const jchar* name, int utf16_length) {
444   int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
445   char stack_buf[ON_STACK_BUFFER_LENGTH];
446   if (utf8_length < (int) sizeof(stack_buf)) {
447     char* chars = stack_buf;
448     UNICODE::convert_to_utf8(name, utf16_length, chars);
449     return new_symbol(chars, utf8_length);
450   } else {
451     ResourceMark rm;
452     char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
453     UNICODE::convert_to_utf8(name, utf16_length, chars);
454     return new_symbol(chars, utf8_length);
455   }
456 }
457 
458 Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length,
459                                          unsigned int& hash) {
460   int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
461   char stack_buf[ON_STACK_BUFFER_LENGTH];
462   if (utf8_length < (int) sizeof(stack_buf)) {
463     char* chars = stack_buf;
464     UNICODE::convert_to_utf8(name, utf16_length, chars);
465     return lookup_only(chars, utf8_length, hash);
466   } else {
467     ResourceMark rm;
468     char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
469     UNICODE::convert_to_utf8(name, utf16_length, chars);
470     return lookup_only(chars, utf8_length, hash);
471   }
472 }
473 
474 void SymbolTable::new_symbols(ClassLoaderData* loader_data, const constantPoolHandle& cp,
475                               int names_count, const char** names, int* lengths,
476                               int* cp_indices, unsigned int* hashValues) {
477   // Note that is_permanent will be false for non-strong hidden classes.
478   // even if their loader is the boot loader because they will have a different cld.
479   bool is_permanent = loader_data->is_the_null_class_loader_data();
480   for (int i = 0; i < names_count; i++) {
481     const char *name = names[i];
482     int len = lengths[i];
483     unsigned int hash = hashValues[i];
484     assert(lookup_shared(name, len, hash) == nullptr, "must have checked already");
485     Symbol* sym = do_add_if_needed(name, len, hash, is_permanent);
486     assert(sym->refcount() != 0, "lookup should have incremented the count");
487     cp->symbol_at_put(cp_indices[i], sym);
488   }
489 }
490 
491 Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool is_permanent) {
492   SymbolTableLookup lookup(name, len, hash);
493   SymbolTableGet stg;
494   bool clean_hint = false;
495   bool rehash_warning = false;
496   Thread* current = Thread::current();
497   Symbol* sym;
498 
499   ResourceMark rm(current);
500   const int alloc_size = Symbol::byte_size(len);
501   u1* u1_buf = NEW_RESOURCE_ARRAY_IN_THREAD(current, u1, alloc_size);
502   Symbol* tmp = ::new ((void*)u1_buf) Symbol((const u1*)name, len,
503                                              (is_permanent || CDSConfig::is_dumping_static_archive()) ? PERM_REFCOUNT : 1);
504 
505   do {
506     if (_local_table->insert(current, lookup, *tmp, &rehash_warning, &clean_hint)) {
507       if (_local_table->get(current, lookup, stg, &rehash_warning)) {
508         sym = stg.get_res_sym();
509         // The get adds one to ref count, but we inserted with our ref already included.
510         // Therefore decrement with one.
511         if (sym->refcount() != PERM_REFCOUNT) {
512           sym->decrement_refcount();
513         }
514         break;
515       }
516     }
517 
518     // In case another thread did a concurrent add, return value already in the table.
519     // This could fail if the symbol got deleted concurrently, so loop back until success.
520     if (_local_table->get(current, lookup, stg, &rehash_warning)) {
521       // The lookup added a refcount, which is ours.
522       sym = stg.get_res_sym();
523       break;
524     }
525   } while(true);
526 
527   update_needs_rehash(rehash_warning);
528 
529   if (clean_hint) {
530     mark_has_items_to_clean();
531     check_concurrent_work();
532   }
533 
534   assert((sym == nullptr) || sym->refcount() != 0, "found dead symbol");
535   return sym;
536 }
537 
538 Symbol* SymbolTable::new_permanent_symbol(const char* name) {
539   unsigned int hash = 0;
540   int len = (int)strlen(name);
541   Symbol* sym = SymbolTable::lookup_only(name, len, hash);
542   if (sym == nullptr) {
543     sym = do_add_if_needed(name, len, hash, /* is_permanent */ true);
544   }
545   if (!sym->is_permanent()) {
546     sym->make_permanent();
547     log_trace_symboltable_helper(sym, "Asked for a permanent symbol, but got a regular one");
548   }
549   return sym;
550 }
551 
552 struct SizeFunc : StackObj {
553   size_t operator()(Symbol* value) {
554     assert(value != nullptr, "expected valid value");
555     return (value)->size() * HeapWordSize;
556   };
557 };
558 
559 TableStatistics SymbolTable::get_table_statistics() {
560   static TableStatistics ts;
561   SizeFunc sz;
562   ts = _local_table->statistics_get(Thread::current(), sz, ts);
563   return ts;
564 }
565 
566 void SymbolTable::print_table_statistics(outputStream* st) {
567   SizeFunc sz;
568   _local_table->statistics_to(Thread::current(), sz, st, "SymbolTable");
569 
570   if (!_shared_table.empty()) {
571     _shared_table.print_table_statistics(st, "Shared Symbol Table");
572   }
573 
574   if (!_dynamic_shared_table.empty()) {
575     _dynamic_shared_table.print_table_statistics(st, "Dynamic Shared Symbol Table");
576   }
577 }
578 
579 // Verification
580 class VerifySymbols : StackObj {
581 public:
582   bool operator()(Symbol* value) {
583     guarantee(value != nullptr, "expected valid value");
584     Symbol* sym = value;
585     guarantee(sym->equals((const char*)sym->bytes(), sym->utf8_length()),
586               "symbol must be internally consistent");
587     return true;
588   };
589 };
590 
591 void SymbolTable::verify() {
592   Thread* thr = Thread::current();
593   VerifySymbols vs;
594   if (!_local_table->try_scan(thr, vs)) {
595     log_info(symboltable)("verify unavailable at this moment");
596   }
597 }
598 
599 static void print_symbol(outputStream* st, Symbol* sym) {
600   const char* utf8_string = (const char*)sym->bytes();
601   int utf8_length = sym->utf8_length();
602   st->print("%d %d: ", utf8_length, sym->refcount());
603   HashtableTextDump::put_utf8(st, utf8_string, utf8_length);
604   st->cr();
605 }
606 
607 // Dumping
608 class DumpSymbol : StackObj {
609   Thread* _thr;
610   outputStream* _st;
611 public:
612   DumpSymbol(Thread* thr, outputStream* st) : _thr(thr), _st(st) {}
613   bool operator()(Symbol* value) {
614     assert(value != nullptr, "expected valid value");
615     print_symbol(_st, value);
616     return true;
617   };
618 };
619 
620 class DumpSharedSymbol : StackObj {
621   outputStream* _st;
622 public:
623   DumpSharedSymbol(outputStream* st) : _st(st) {}
624   void do_value(Symbol* value) {
625     assert(value != nullptr, "value should point to a symbol");
626     print_symbol(_st, value);
627   };
628 };
629 
630 void SymbolTable::dump(outputStream* st, bool verbose) {
631   if (!verbose) {
632     print_table_statistics(st);
633   } else {
634     Thread* thr = Thread::current();
635     ResourceMark rm(thr);
636     st->print_cr("VERSION: 1.1");
637     DumpSymbol ds(thr, st);
638     if (!_local_table->try_scan(thr, ds)) {
639       log_info(symboltable)("dump unavailable at this moment");
640     }
641     if (!_shared_table.empty()) {
642       st->print_cr("#----------------");
643       st->print_cr("# Shared symbols:");
644       st->print_cr("#----------------");
645       DumpSharedSymbol dss(st);
646       _shared_table.iterate(&dss);
647     }
648     if (!_dynamic_shared_table.empty()) {
649       st->print_cr("#------------------------");
650       st->print_cr("# Dynamic shared symbols:");
651       st->print_cr("#------------------------");
652       DumpSharedSymbol dss(st);
653       _dynamic_shared_table.iterate(&dss);
654     }
655   }
656 }
657 
658 #if INCLUDE_CDS
659 void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
660                                            CompactHashtableWriter* writer) {
661   ArchiveBuilder* builder = ArchiveBuilder::current();
662   int len = symbols->length();
663   for (int i = 0; i < len; i++) {
664     Symbol* sym = ArchiveBuilder::get_buffered_symbol(symbols->at(i));
665     unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
666     assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
667            "must not rehash during dumping");
668     sym->set_permanent();
669     writer->add(fixed_hash, builder->buffer_to_offset_u4((address)sym));
670   }
671 }
672 
673 size_t SymbolTable::estimate_size_for_archive() {
674   if (_items_count > (size_t)max_jint) {
675     fatal("Too many symbols to be archived: %zu", _items_count);
676   }
677   return CompactHashtableWriter::estimate_size(int(_items_count));
678 }
679 
680 void SymbolTable::write_to_archive(GrowableArray<Symbol*>* symbols) {
681   CompactHashtableWriter writer(int(_items_count), ArchiveBuilder::symbol_stats());
682   copy_shared_symbol_table(symbols, &writer);
683   _shared_table_for_dumping.reset();
684   writer.dump(&_shared_table_for_dumping, "symbol");
685 }
686 
687 void SymbolTable::serialize_shared_table_header(SerializeClosure* soc,
688                                                 bool is_static_archive) {
689   OffsetCompactHashtable<const char*, Symbol*, symbol_equals_compact_hashtable_entry> * table;
690   if (soc->reading()) {
691     if (is_static_archive) {
692       table = &_shared_table;
693     } else {
694       table = &_dynamic_shared_table;
695     }
696   } else {
697     table = &_shared_table_for_dumping;
698   }
699 
700   table->serialize_header(soc);
701 }
702 #endif //INCLUDE_CDS
703 
704 // Concurrent work
705 void SymbolTable::grow(JavaThread* jt) {
706   SymbolTableHash::GrowTask gt(_local_table);
707   if (!gt.prepare(jt)) {
708     return;
709   }
710   log_trace(symboltable)("Started to grow");
711   {
712     TraceTime timer("Grow", TRACETIME_LOG(Debug, symboltable, perf));
713     while (gt.do_task(jt)) {
714       gt.pause(jt);
715       {
716         ThreadBlockInVM tbivm(jt);
717       }
718       gt.cont(jt);
719     }
720   }
721   gt.done(jt);
722   _current_size = table_size();
723   log_debug(symboltable)("Grown to size:" SIZE_FORMAT, _current_size);
724 }
725 
726 struct SymbolTableDoDelete : StackObj {
727   size_t _deleted;
728   SymbolTableDoDelete() : _deleted(0) {}
729   void operator()(Symbol* value) {
730     assert(value != nullptr, "expected valid value");
731     Symbol *sym = value;
732     assert(sym->refcount() == 0, "refcount");
733     _deleted++;
734   }
735 };
736 
737 struct SymbolTableDeleteCheck : StackObj {
738   size_t _processed;
739   SymbolTableDeleteCheck() : _processed(0) {}
740   bool operator()(Symbol* value) {
741     assert(value != nullptr, "expected valid value");
742     _processed++;
743     Symbol *sym = value;
744     return (sym->refcount() == 0);
745   }
746 };
747 
748 void SymbolTable::clean_dead_entries(JavaThread* jt) {
749   SymbolTableHash::BulkDeleteTask bdt(_local_table);
750   if (!bdt.prepare(jt)) {
751     return;
752   }
753 
754   SymbolTableDeleteCheck stdc;
755   SymbolTableDoDelete stdd;
756   NativeHeapTrimmer::SuspendMark sm("symboltable");
757   {
758     TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
759     while (bdt.do_task(jt, stdc, stdd)) {
760       bdt.pause(jt);
761       {
762         ThreadBlockInVM tbivm(jt);
763       }
764       bdt.cont(jt);
765     }
766     reset_has_items_to_clean();
767     bdt.done(jt);
768   }
769 
770   Atomic::add(&_symbols_counted, stdc._processed);
771 
772   log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT,
773                          stdd._deleted, stdc._processed);
774 }
775 
776 void SymbolTable::check_concurrent_work() {
777   if (has_work()) {
778     return;
779   }
780   // We should clean/resize if we have
781   // more items than preferred load factor or
782   // more dead items than water mark.
783   if (has_items_to_clean() || (get_load_factor() > PREF_AVG_LIST_LEN)) {
784     log_debug(symboltable)("Concurrent work triggered, load factor: %f, items to clean: %s",
785                            get_load_factor(), has_items_to_clean() ? "true" : "false");
786     trigger_cleanup();
787   }
788 }
789 
790 bool SymbolTable::should_grow() {
791   return get_load_factor() > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached();
792 }
793 
794 void SymbolTable::do_concurrent_work(JavaThread* jt) {
795   // Rehash if needed.  Rehashing goes to a safepoint but the rest of this
796   // work is concurrent.
797   if (needs_rehashing() && maybe_rehash_table()) {
798     Atomic::release_store(&_has_work, false);
799     return; // done, else grow
800   }
801   log_debug(symboltable, perf)("Concurrent work, live factor: %g", get_load_factor());
802   // We prefer growing, since that also removes dead items
803   if (should_grow()) {
804     grow(jt);
805   } else {
806     clean_dead_entries(jt);
807   }
808   Atomic::release_store(&_has_work, false);
809 }
810 
811 // Called at VM_Operation safepoint
812 void SymbolTable::rehash_table() {
813   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint");
814   // The ServiceThread initiates the rehashing so it is not resizing.
815   assert (_local_table->is_safepoint_safe(), "Should not be resizing now");
816 
817   _alt_hash_seed = AltHashing::compute_seed();
818 
819   // We use current size
820   size_t new_size = _local_table->get_size_log2(Thread::current());
821   SymbolTableHash* new_table = new SymbolTableHash(new_size, END_SIZE, REHASH_LEN, true);
822   // Use alt hash from now on
823   _alt_hash = true;
824   _local_table->rehash_nodes_to(Thread::current(), new_table);
825 
826   // free old table
827   delete _local_table;
828   _local_table = new_table;
829 
830   _rehashed = true;
831   _needs_rehashing = false;
832 }
833 
834 bool SymbolTable::maybe_rehash_table() {
835   log_debug(symboltable)("Table imbalanced, rehashing called.");
836 
837   // Grow instead of rehash.
838   if (should_grow()) {
839     log_debug(symboltable)("Choosing growing over rehashing.");
840     _needs_rehashing = false;
841     return false;
842   }
843 
844   // Already rehashed.
845   if (_rehashed) {
846     log_warning(symboltable)("Rehashing already done, still long lists.");
847     _needs_rehashing = false;
848     return false;
849   }
850 
851   VM_RehashSymbolTable op;
852   VMThread::execute(&op);
853   return true;
854 }
855 
856 //---------------------------------------------------------------------------
857 // Non-product code
858 
859 #ifndef PRODUCT
860 
861 class HistogramIterator : StackObj {
862 public:
863   static const size_t results_length = 100;
864   size_t counts[results_length];
865   size_t sizes[results_length];
866   size_t total_size;
867   size_t total_count;
868   size_t total_length;
869   size_t max_length;
870   size_t out_of_range_count;
871   size_t out_of_range_size;
872   HistogramIterator() : total_size(0), total_count(0), total_length(0),
873                         max_length(0), out_of_range_count(0), out_of_range_size(0) {
874     // initialize results to zero
875     for (size_t i = 0; i < results_length; i++) {
876       counts[i] = 0;
877       sizes[i] = 0;
878     }
879   }
880   bool operator()(Symbol* value) {
881     assert(value != nullptr, "expected valid value");
882     Symbol* sym = value;
883     size_t size = sym->size();
884     size_t len = sym->utf8_length();
885     if (len < results_length) {
886       counts[len]++;
887       sizes[len] += size;
888     } else {
889       out_of_range_count++;
890       out_of_range_size += size;
891     }
892     total_count++;
893     total_size += size;
894     total_length += len;
895     max_length = MAX2(max_length, len);
896 
897     return true;
898   };
899 };
900 
901 void SymbolTable::print_histogram() {
902   HistogramIterator hi;
903   _local_table->do_scan(Thread::current(), hi);
904   tty->print_cr("Symbol Table Histogram:");
905   tty->print_cr("  Total number of symbols  " SIZE_FORMAT_W(7), hi.total_count);
906   tty->print_cr("  Total size in memory     " SIZE_FORMAT_W(7) "K", (hi.total_size * wordSize) / K);
907   tty->print_cr("  Total counted            " SIZE_FORMAT_W(7), _symbols_counted);
908   tty->print_cr("  Total removed            " SIZE_FORMAT_W(7), _symbols_removed);
909   if (_symbols_counted > 0) {
910     tty->print_cr("  Percent removed          %3.2f",
911           ((double)_symbols_removed / (double)_symbols_counted) * 100);
912   }
913   tty->print_cr("  Reference counts         " SIZE_FORMAT_W(7), Symbol::_total_count);
914   tty->print_cr("  Symbol arena used        " SIZE_FORMAT_W(7) "K", arena()->used() / K);
915   tty->print_cr("  Symbol arena size        " SIZE_FORMAT_W(7) "K", arena()->size_in_bytes() / K);
916   tty->print_cr("  Total symbol length      " SIZE_FORMAT_W(7), hi.total_length);
917   tty->print_cr("  Maximum symbol length    " SIZE_FORMAT_W(7), hi.max_length);
918   tty->print_cr("  Average symbol length    %7.2f", ((double)hi.total_length / (double)hi.total_count));
919   tty->print_cr("  Symbol length histogram:");
920   tty->print_cr("    %6s %10s %10s", "Length", "#Symbols", "Size");
921   for (size_t i = 0; i < hi.results_length; i++) {
922     if (hi.counts[i] > 0) {
923       tty->print_cr("    " SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K",
924                     i, hi.counts[i], (hi.sizes[i] * wordSize) / K);
925     }
926   }
927   tty->print_cr("  >=" SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K\n",
928                 hi.results_length, hi.out_of_range_count, (hi.out_of_range_size*wordSize) / K);
929 }
930 #endif // PRODUCT
931 
932 // Utility for dumping symbols
933 SymboltableDCmd::SymboltableDCmd(outputStream* output, bool heap) :
934                                  DCmdWithParser(output, heap),
935   _verbose("-verbose", "Dump the content of each symbol in the table",
936            "BOOLEAN", false, "false") {
937   _dcmdparser.add_dcmd_option(&_verbose);
938 }
939 
940 void SymboltableDCmd::execute(DCmdSource source, TRAPS) {
941   VM_DumpHashtable dumper(output(), VM_DumpHashtable::DumpSymbols,
942                          _verbose.value());
943   VMThread::execute(&dumper);
944 }