1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveBuilder.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/dynamicArchive.hpp"
 29 #include "classfile/altHashing.hpp"
 30 #include "classfile/classLoaderData.hpp"
 31 #include "classfile/compactHashtable.hpp"
 32 #include "classfile/javaClasses.hpp"
 33 #include "classfile/symbolTable.hpp"
 34 #include "memory/allocation.inline.hpp"
 35 #include "memory/metaspaceClosure.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "runtime/atomic.hpp"
 39 #include "runtime/interfaceSupport.inline.hpp"
 40 #include "runtime/timerTrace.hpp"
 41 #include "runtime/trimNativeHeap.hpp"
 42 #include "services/diagnosticCommand.hpp"
 43 #include "utilities/concurrentHashTable.inline.hpp"
 44 #include "utilities/concurrentHashTableTasks.inline.hpp"
 45 #include "utilities/utf8.hpp"
 46 
 47 // We used to not resize at all, so let's be conservative
 48 // and not set it too short before we decide to resize,
 49 // to match previous startup behavior
 50 const double PREF_AVG_LIST_LEN = 8.0;
 51 // 2^24 is max size, like StringTable.
 52 const size_t END_SIZE = 24;
 53 // If a chain gets to 100 something might be wrong
 54 const size_t REHASH_LEN = 100;
 55 
 56 const size_t ON_STACK_BUFFER_LENGTH = 128;
 57 
 58 // --------------------------------------------------------------------------
 59 
 60 inline bool symbol_equals_compact_hashtable_entry(Symbol* value, const char* key, int len) {
 61   if (value->equals(key, len)) {
 62     return true;
 63   } else {
 64     return false;
 65   }
 66 }
 67 
 68 static OffsetCompactHashtable<
 69   const char*, Symbol*,
 70   symbol_equals_compact_hashtable_entry
 71 > _shared_table;
 72 
 73 static OffsetCompactHashtable<
 74   const char*, Symbol*,
 75   symbol_equals_compact_hashtable_entry
 76 > _dynamic_shared_table;
 77 
 78 // --------------------------------------------------------------------------
 79 
 80 typedef ConcurrentHashTable<SymbolTableConfig, mtSymbol> SymbolTableHash;
 81 static SymbolTableHash* _local_table = nullptr;
 82 
 83 volatile bool SymbolTable::_has_work = 0;
 84 volatile bool SymbolTable::_needs_rehashing = false;
 85 
 86 // For statistics
 87 static size_t _symbols_removed = 0;
 88 static size_t _symbols_counted = 0;
 89 static size_t _current_size = 0;
 90 
 91 static volatile size_t _items_count = 0;
 92 static volatile bool   _has_items_to_clean = false;
 93 
 94 
 95 static volatile bool _alt_hash = false;
 96 
 97 #ifdef USE_LIBRARY_BASED_TLS_ONLY
 98 static volatile bool _lookup_shared_first = false;
 99 #else
100 // "_lookup_shared_first" can get highly contended with many cores if multiple threads
101 // are updating "lookup success history" in a global shared variable. If built-in TLS is available, use it.
102 static THREAD_LOCAL bool _lookup_shared_first = false;
103 #endif
104 
105 // Static arena for symbols that are not deallocated
106 Arena* SymbolTable::_arena = nullptr;
107 
108 static bool _rehashed = false;
109 static uint64_t _alt_hash_seed = 0;
110 
111 static inline void log_trace_symboltable_helper(Symbol* sym, const char* msg) {
112 #ifndef PRODUCT
113   ResourceMark rm;
114   log_trace(symboltable)("%s [%s]", msg, sym->as_quoted_ascii());
115 #endif // PRODUCT
116 }
117 
118 // Pick hashing algorithm.
119 static unsigned int hash_symbol(const char* s, int len, bool useAlt) {
120   return useAlt ?
121   AltHashing::halfsiphash_32(_alt_hash_seed, (const uint8_t*)s, len) :
122   java_lang_String::hash_code((const jbyte*)s, len);
123 }
124 
125 #if INCLUDE_CDS
126 static unsigned int hash_shared_symbol(const char* s, int len) {
127   return java_lang_String::hash_code((const jbyte*)s, len);
128 }
129 #endif
130 
131 class SymbolTableConfig : public AllStatic {
132 
133 public:
134   typedef Symbol Value;  // value of the Node in the hashtable
135 
136   static uintx get_hash(Value const& value, bool* is_dead) {
137     *is_dead = (value.refcount() == 0);
138     if (*is_dead) {
139       return 0;
140     } else {
141       return hash_symbol((const char*)value.bytes(), value.utf8_length(), _alt_hash);
142     }
143   }
144   // We use default allocation/deallocation but counted
145   static void* allocate_node(void* context, size_t size, Value const& value) {
146     SymbolTable::item_added();
147     return allocate_node_impl(size, value);
148   }
149   static void free_node(void* context, void* memory, Value & value) {
150     // We get here because #1 some threads lost a race to insert a newly created Symbol
151     // or #2 we're cleaning up unused symbol.
152     // If #1, then the symbol can be either permanent,
153     // or regular newly created one (refcount==1)
154     // If #2, then the symbol is dead (refcount==0)
155     assert(value.is_permanent() || (value.refcount() == 1) || (value.refcount() == 0),
156            "refcount %d", value.refcount());
157 #if INCLUDE_CDS
158     if (CDSConfig::is_dumping_static_archive()) {
159       // We have allocated with MetaspaceShared::symbol_space_alloc(). No deallocation is needed.
160       // Unreferenced Symbols will not be copied into the archive.
161       return;
162     }
163 #endif
164     if (value.refcount() == 1) {
165       value.decrement_refcount();
166       assert(value.refcount() == 0, "expected dead symbol");
167     }
168     if (value.refcount() != PERM_REFCOUNT) {
169       FreeHeap(memory);
170     } else {
171       MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
172       // Deleting permanent symbol should not occur very often (insert race condition),
173       // so log it.
174       log_trace_symboltable_helper(&value, "Freeing permanent symbol");
175       size_t alloc_size = _local_table->get_node_size() + value.byte_size() + value.effective_length();
176       if (!SymbolTable::arena()->Afree(memory, alloc_size)) {
177         log_trace_symboltable_helper(&value, "Leaked permanent symbol");
178       }
179     }
180     SymbolTable::item_removed();
181   }
182 
183 private:
184   static void* allocate_node_impl(size_t size, Value const& value) {
185     size_t alloc_size = size + value.byte_size() + value.effective_length();
186 #if INCLUDE_CDS
187     if (CDSConfig::is_dumping_static_archive()) {
188       MutexLocker ml(DumpRegion_lock, Mutex::_no_safepoint_check_flag);
189       // To get deterministic output from -Xshare:dump, we ensure that Symbols are allocated in
190       // increasing addresses. When the symbols are copied into the archive, we preserve their
191       // relative address order (sorted, see ArchiveBuilder::gather_klasses_and_symbols).
192       //
193       // We cannot use arena because arena chunks are allocated by the OS. As a result, for example,
194       // the archived symbol of "java/lang/Object" may sometimes be lower than "java/lang/String", and
195       // sometimes be higher. This would cause non-deterministic contents in the archive.
196       DEBUG_ONLY(static void* last = 0);
197       void* p = (void*)MetaspaceShared::symbol_space_alloc(alloc_size);
198       assert(p > last, "must increase monotonically");
199       DEBUG_ONLY(last = p);
200       return p;
201     }
202 #endif
203     if (value.refcount() != PERM_REFCOUNT) {
204       return AllocateHeap(alloc_size, mtSymbol);
205     } else {
206       // Allocate to global arena
207       MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
208       return SymbolTable::arena()->Amalloc(alloc_size);
209     }
210   }
211 };
212 
213 void SymbolTable::create_table ()  {
214   size_t start_size_log_2 = ceil_log2(SymbolTableSize);
215   _current_size = ((size_t)1) << start_size_log_2;
216   log_trace(symboltable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
217                          _current_size, start_size_log_2);
218   _local_table = new SymbolTableHash(start_size_log_2, END_SIZE, REHASH_LEN, true);
219 
220   // Initialize the arena for global symbols, size passed in depends on CDS.
221   if (symbol_alloc_arena_size == 0) {
222     _arena = new (mtSymbol) Arena(mtSymbol);
223   } else {
224     _arena = new (mtSymbol) Arena(mtSymbol, Arena::Tag::tag_other, symbol_alloc_arena_size);
225   }
226 }
227 
228 void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); }
229 void SymbolTable::mark_has_items_to_clean()  { Atomic::store(&_has_items_to_clean, true); }
230 bool SymbolTable::has_items_to_clean()       { return Atomic::load(&_has_items_to_clean); }
231 
232 void SymbolTable::item_added() {
233   Atomic::inc(&_items_count);
234 }
235 
236 void SymbolTable::item_removed() {
237   Atomic::inc(&(_symbols_removed));
238   Atomic::dec(&_items_count);
239 }
240 
241 double SymbolTable::get_load_factor() {
242   return (double)_items_count/(double)_current_size;
243 }
244 
245 size_t SymbolTable::table_size() {
246   return ((size_t)1) << _local_table->get_size_log2(Thread::current());
247 }
248 
249 bool SymbolTable::has_work() { return Atomic::load_acquire(&_has_work); }
250 
251 void SymbolTable::trigger_cleanup() {
252   // Avoid churn on ServiceThread
253   if (!has_work()) {
254     MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
255     _has_work = true;
256     Service_lock->notify_all();
257   }
258 }
259 
260 class SymbolsDo : StackObj {
261   SymbolClosure *_cl;
262 public:
263   SymbolsDo(SymbolClosure *cl) : _cl(cl) {}
264   bool operator()(Symbol* value) {
265     assert(value != nullptr, "expected valid value");
266     _cl->do_symbol(&value);
267     return true;
268   };
269 };
270 
271 class SharedSymbolIterator {
272   SymbolClosure* _symbol_closure;
273 public:
274   SharedSymbolIterator(SymbolClosure* f) : _symbol_closure(f) {}
275   void do_value(Symbol* symbol) {
276     _symbol_closure->do_symbol(&symbol);
277   }
278 };
279 
280 // Call function for all symbols in the symbol table.
281 void SymbolTable::symbols_do(SymbolClosure *cl) {
282   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
283   // all symbols from shared table
284   SharedSymbolIterator iter(cl);
285   _shared_table.iterate(&iter);
286   _dynamic_shared_table.iterate(&iter);
287 
288   // all symbols from the dynamic table
289   SymbolsDo sd(cl);
290   _local_table->do_safepoint_scan(sd);
291 }
292 
293 // Call function for all symbols in shared table. Used by -XX:+PrintSharedArchiveAndExit
294 void SymbolTable::shared_symbols_do(SymbolClosure *cl) {
295   SharedSymbolIterator iter(cl);
296   _shared_table.iterate(&iter);
297   _dynamic_shared_table.iterate(&iter);
298 }
299 
300 Symbol* SymbolTable::lookup_dynamic(const char* name,
301                                     int len, unsigned int hash) {
302   Symbol* sym = do_lookup(name, len, hash);
303   assert((sym == nullptr) || sym->refcount() != 0, "refcount must not be zero");
304   return sym;
305 }
306 
307 #if INCLUDE_CDS
308 Symbol* SymbolTable::lookup_shared(const char* name,
309                                    int len, unsigned int hash) {
310   Symbol* sym = nullptr;
311   if (!_shared_table.empty()) {
312     if (_alt_hash) {
313       // hash_code parameter may use alternate hashing algorithm but the shared table
314       // always uses the same original hash code.
315       hash = hash_shared_symbol(name, len);
316     }
317     sym = _shared_table.lookup(name, hash, len);
318     if (sym == nullptr && DynamicArchive::is_mapped()) {
319       sym = _dynamic_shared_table.lookup(name, hash, len);
320     }
321   }
322   return sym;
323 }
324 #endif
325 
326 Symbol* SymbolTable::lookup_common(const char* name,
327                             int len, unsigned int hash) {
328   Symbol* sym;
329   if (_lookup_shared_first) {
330     sym = lookup_shared(name, len, hash);
331     if (sym == nullptr) {
332       _lookup_shared_first = false;
333       sym = lookup_dynamic(name, len, hash);
334     }
335   } else {
336     sym = lookup_dynamic(name, len, hash);
337     if (sym == nullptr) {
338       sym = lookup_shared(name, len, hash);
339       if (sym != nullptr) {
340         _lookup_shared_first = true;
341       }
342     }
343   }
344   return sym;
345 }
346 
347 Symbol* SymbolTable::new_symbol(const char* name, int len) {
348   assert(len <= Symbol::max_length(), "sanity");
349   unsigned int hash = hash_symbol(name, len, _alt_hash);
350   Symbol* sym = lookup_common(name, len, hash);
351   if (sym == nullptr) {
352     sym = do_add_if_needed(name, len, hash, /* is_permanent */ false);
353   }
354   assert(sym->refcount() != 0, "lookup should have incremented the count");
355   assert(sym->equals(name, len), "symbol must be properly initialized");
356   return sym;
357 }
358 
359 Symbol* SymbolTable::new_symbol(const Symbol* sym, int begin, int end) {
360   assert(begin <= end && end <= sym->utf8_length(), "just checking");
361   assert(sym->refcount() != 0, "require a valid symbol");
362   const char* name = (const char*)sym->base() + begin;
363   int len = end - begin;
364   assert(len <= Symbol::max_length(), "sanity");
365   unsigned int hash = hash_symbol(name, len, _alt_hash);
366   Symbol* found = lookup_common(name, len, hash);
367   if (found == nullptr) {
368     found = do_add_if_needed(name, len, hash, /* is_permanent */ false);
369   }
370   return found;
371 }
372 
373 class SymbolTableLookup : StackObj {
374 private:
375   uintx _hash;
376   int _len;
377   const char* _str;
378 public:
379   SymbolTableLookup(const char* key, int len, uintx hash)
380   : _hash(hash), _len(len), _str(key) {}
381   uintx get_hash() const {
382     return _hash;
383   }
384   // Note: When equals() returns "true", the symbol's refcount is incremented. This is
385   // needed to ensure that the symbol is kept alive before equals() returns to the caller,
386   // so that another thread cannot clean the symbol up concurrently. The caller is
387   // responsible for decrementing the refcount, when the symbol is no longer needed.
388   bool equals(Symbol* value) {
389     assert(value != nullptr, "expected valid value");
390     Symbol *sym = value;
391     if (sym->equals(_str, _len)) {
392       if (sym->try_increment_refcount()) {
393         // something is referencing this symbol now.
394         return true;
395       } else {
396         assert(sym->refcount() == 0, "expected dead symbol");
397         return false;
398       }
399     } else {
400       return false;
401     }
402   }
403   bool is_dead(Symbol* value) {
404     return value->refcount() == 0;
405   }
406 };
407 
408 class SymbolTableGet : public StackObj {
409   Symbol* _return;
410 public:
411   SymbolTableGet() : _return(nullptr) {}
412   void operator()(Symbol* value) {
413     assert(value != nullptr, "expected valid value");
414     _return = value;
415   }
416   Symbol* get_res_sym() const {
417     return _return;
418   }
419 };
420 
421 void SymbolTable::update_needs_rehash(bool rehash) {
422   if (rehash) {
423     _needs_rehashing = true;
424     trigger_cleanup();
425   }
426 }
427 
428 Symbol* SymbolTable::do_lookup(const char* name, int len, uintx hash) {
429   Thread* thread = Thread::current();
430   SymbolTableLookup lookup(name, len, hash);
431   SymbolTableGet stg;
432   bool rehash_warning = false;
433   _local_table->get(thread, lookup, stg, &rehash_warning);
434   update_needs_rehash(rehash_warning);
435   Symbol* sym = stg.get_res_sym();
436   assert((sym == nullptr) || sym->refcount() != 0, "found dead symbol");
437   return sym;
438 }
439 
440 Symbol* SymbolTable::lookup_only(const char* name, int len, unsigned int& hash) {
441   hash = hash_symbol(name, len, _alt_hash);
442   return lookup_common(name, len, hash);
443 }
444 
445 // Suggestion: Push unicode-based lookup all the way into the hashing
446 // and probing logic, so there is no need for convert_to_utf8 until
447 // an actual new Symbol* is created.
448 Symbol* SymbolTable::new_symbol(const jchar* name, int utf16_length) {
449   int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
450   char stack_buf[ON_STACK_BUFFER_LENGTH];
451   if (utf8_length < (int) sizeof(stack_buf)) {
452     char* chars = stack_buf;
453     UNICODE::convert_to_utf8(name, utf16_length, chars);
454     return new_symbol(chars, utf8_length);
455   } else {
456     ResourceMark rm;
457     char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
458     UNICODE::convert_to_utf8(name, utf16_length, chars);
459     return new_symbol(chars, utf8_length);
460   }
461 }
462 
463 Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length,
464                                          unsigned int& hash) {
465   int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
466   char stack_buf[ON_STACK_BUFFER_LENGTH];
467   if (utf8_length < (int) sizeof(stack_buf)) {
468     char* chars = stack_buf;
469     UNICODE::convert_to_utf8(name, utf16_length, chars);
470     return lookup_only(chars, utf8_length, hash);
471   } else {
472     ResourceMark rm;
473     char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
474     UNICODE::convert_to_utf8(name, utf16_length, chars);
475     return lookup_only(chars, utf8_length, hash);
476   }
477 }
478 
479 void SymbolTable::new_symbols(ClassLoaderData* loader_data, const constantPoolHandle& cp,
480                               int names_count, const char** names, int* lengths,
481                               int* cp_indices, unsigned int* hashValues) {
482   // Note that is_permanent will be false for non-strong hidden classes.
483   // even if their loader is the boot loader because they will have a different cld.
484   bool is_permanent = loader_data->is_the_null_class_loader_data();
485   for (int i = 0; i < names_count; i++) {
486     const char *name = names[i];
487     int len = lengths[i];
488     unsigned int hash = hashValues[i];
489     assert(lookup_shared(name, len, hash) == nullptr, "must have checked already");
490     Symbol* sym = do_add_if_needed(name, len, hash, is_permanent);
491     assert(sym->refcount() != 0, "lookup should have incremented the count");
492     cp->symbol_at_put(cp_indices[i], sym);
493   }
494 }
495 
496 Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool is_permanent) {
497   SymbolTableLookup lookup(name, len, hash);
498   SymbolTableGet stg;
499   bool clean_hint = false;
500   bool rehash_warning = false;
501   Thread* current = Thread::current();
502   Symbol* sym;
503 
504   ResourceMark rm(current);
505   const int alloc_size = Symbol::byte_size(len);
506   u1* u1_buf = NEW_RESOURCE_ARRAY_IN_THREAD(current, u1, alloc_size);
507   Symbol* tmp = ::new ((void*)u1_buf) Symbol((const u1*)name, len,
508                                              (is_permanent || CDSConfig::is_dumping_static_archive()) ? PERM_REFCOUNT : 1);
509 
510   do {
511     if (_local_table->insert(current, lookup, *tmp, &rehash_warning, &clean_hint)) {
512       if (_local_table->get(current, lookup, stg, &rehash_warning)) {
513         sym = stg.get_res_sym();
514         // The get adds one to ref count, but we inserted with our ref already included.
515         // Therefore decrement with one.
516         if (sym->refcount() != PERM_REFCOUNT) {
517           sym->decrement_refcount();
518         }
519         break;
520       }
521     }
522 
523     // In case another thread did a concurrent add, return value already in the table.
524     // This could fail if the symbol got deleted concurrently, so loop back until success.
525     if (_local_table->get(current, lookup, stg, &rehash_warning)) {
526       // The lookup added a refcount, which is ours.
527       sym = stg.get_res_sym();
528       break;
529     }
530   } while(true);
531 
532   update_needs_rehash(rehash_warning);
533 
534   if (clean_hint) {
535     mark_has_items_to_clean();
536     check_concurrent_work();
537   }
538 
539   assert((sym == nullptr) || sym->refcount() != 0, "found dead symbol");
540   return sym;
541 }
542 
543 Symbol* SymbolTable::new_permanent_symbol(const char* name) {
544   unsigned int hash = 0;
545   int len = (int)strlen(name);
546   Symbol* sym = SymbolTable::lookup_only(name, len, hash);
547   if (sym == nullptr) {
548     sym = do_add_if_needed(name, len, hash, /* is_permanent */ true);
549   }
550   if (!sym->is_permanent()) {
551     sym->make_permanent();
552     log_trace_symboltable_helper(sym, "Asked for a permanent symbol, but got a regular one");
553   }
554   return sym;
555 }
556 
557 struct SizeFunc : StackObj {
558   size_t operator()(Symbol* value) {
559     assert(value != nullptr, "expected valid value");
560     return (value)->size() * HeapWordSize;
561   };
562 };
563 
564 TableStatistics SymbolTable::get_table_statistics() {
565   static TableStatistics ts;
566   SizeFunc sz;
567   ts = _local_table->statistics_get(Thread::current(), sz, ts);
568   return ts;
569 }
570 
571 void SymbolTable::print_table_statistics(outputStream* st) {
572   SizeFunc sz;
573   _local_table->statistics_to(Thread::current(), sz, st, "SymbolTable");
574 
575   if (!_shared_table.empty()) {
576     _shared_table.print_table_statistics(st, "Shared Symbol Table");
577   }
578 
579   if (!_dynamic_shared_table.empty()) {
580     _dynamic_shared_table.print_table_statistics(st, "Dynamic Shared Symbol Table");
581   }
582 }
583 
584 // Verification
585 class VerifySymbols : StackObj {
586 public:
587   bool operator()(Symbol* value) {
588     guarantee(value != nullptr, "expected valid value");
589     Symbol* sym = value;
590     guarantee(sym->equals((const char*)sym->bytes(), sym->utf8_length()),
591               "symbol must be internally consistent");
592     return true;
593   };
594 };
595 
596 void SymbolTable::verify() {
597   Thread* thr = Thread::current();
598   VerifySymbols vs;
599   if (!_local_table->try_scan(thr, vs)) {
600     log_info(symboltable)("verify unavailable at this moment");
601   }
602 }
603 
604 static void print_symbol(outputStream* st, Symbol* sym) {
605   const char* utf8_string = (const char*)sym->bytes();
606   int utf8_length = sym->utf8_length();
607   st->print("%d %d: ", utf8_length, sym->refcount());
608   HashtableTextDump::put_utf8(st, utf8_string, utf8_length);
609   st->cr();
610 }
611 
612 // Dumping
613 class DumpSymbol : StackObj {
614   Thread* _thr;
615   outputStream* _st;
616 public:
617   DumpSymbol(Thread* thr, outputStream* st) : _thr(thr), _st(st) {}
618   bool operator()(Symbol* value) {
619     assert(value != nullptr, "expected valid value");
620     print_symbol(_st, value);
621     return true;
622   };
623 };
624 
625 class DumpSharedSymbol : StackObj {
626   outputStream* _st;
627 public:
628   DumpSharedSymbol(outputStream* st) : _st(st) {}
629   void do_value(Symbol* value) {
630     assert(value != nullptr, "value should point to a symbol");
631     print_symbol(_st, value);
632   };
633 };
634 
635 void SymbolTable::dump(outputStream* st, bool verbose) {
636   if (!verbose) {
637     print_table_statistics(st);
638   } else {
639     Thread* thr = Thread::current();
640     ResourceMark rm(thr);
641     st->print_cr("VERSION: 1.1");
642     DumpSymbol ds(thr, st);
643     if (!_local_table->try_scan(thr, ds)) {
644       log_info(symboltable)("dump unavailable at this moment");
645     }
646     if (!_shared_table.empty()) {
647       st->print_cr("#----------------");
648       st->print_cr("# Shared symbols:");
649       st->print_cr("#----------------");
650       DumpSharedSymbol dss(st);
651       _shared_table.iterate(&dss);
652     }
653     if (!_dynamic_shared_table.empty()) {
654       st->print_cr("#------------------------");
655       st->print_cr("# Dynamic shared symbols:");
656       st->print_cr("#------------------------");
657       DumpSharedSymbol dss(st);
658       _dynamic_shared_table.iterate(&dss);
659     }
660   }
661 }
662 
663 #if INCLUDE_CDS
664 void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
665                                            CompactHashtableWriter* writer) {
666   ArchiveBuilder* builder = ArchiveBuilder::current();
667   int len = symbols->length();
668   for (int i = 0; i < len; i++) {
669     Symbol* sym = ArchiveBuilder::get_buffered_symbol(symbols->at(i));
670     unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
671     assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
672            "must not rehash during dumping");
673     sym->set_permanent();
674     writer->add(fixed_hash, builder->buffer_to_offset_u4((address)sym));
675   }
676 }
677 
678 size_t SymbolTable::estimate_size_for_archive() {
679   if (_items_count > (size_t)max_jint) {
680     fatal("Too many symbols to be archived: %zu", _items_count);
681   }
682   return CompactHashtableWriter::estimate_size(int(_items_count));
683 }
684 
685 void SymbolTable::write_to_archive(GrowableArray<Symbol*>* symbols) {
686   CompactHashtableWriter writer(int(_items_count), ArchiveBuilder::symbol_stats());
687   copy_shared_symbol_table(symbols, &writer);
688   if (CDSConfig::is_dumping_static_archive()) {
689     _shared_table.reset();
690     writer.dump(&_shared_table, "symbol");
691   } else {
692     assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
693     _dynamic_shared_table.reset();
694     writer.dump(&_dynamic_shared_table, "symbol");
695   }
696 }
697 
698 void SymbolTable::serialize_shared_table_header(SerializeClosure* soc,
699                                                 bool is_static_archive) {
700   OffsetCompactHashtable<const char*, Symbol*, symbol_equals_compact_hashtable_entry> * table;
701   if (is_static_archive) {
702     table = &_shared_table;
703   } else {
704     table = &_dynamic_shared_table;
705   }
706   table->serialize_header(soc);
707   if (soc->writing()) {
708     // Sanity. Make sure we don't use the shared table at dump time
709     table->reset();
710   }
711 }
712 #endif //INCLUDE_CDS
713 
714 // Concurrent work
715 void SymbolTable::grow(JavaThread* jt) {
716   SymbolTableHash::GrowTask gt(_local_table);
717   if (!gt.prepare(jt)) {
718     return;
719   }
720   log_trace(symboltable)("Started to grow");
721   {
722     TraceTime timer("Grow", TRACETIME_LOG(Debug, symboltable, perf));
723     while (gt.do_task(jt)) {
724       gt.pause(jt);
725       {
726         ThreadBlockInVM tbivm(jt);
727       }
728       gt.cont(jt);
729     }
730   }
731   gt.done(jt);
732   _current_size = table_size();
733   log_debug(symboltable)("Grown to size:" SIZE_FORMAT, _current_size);
734 }
735 
736 struct SymbolTableDoDelete : StackObj {
737   size_t _deleted;
738   SymbolTableDoDelete() : _deleted(0) {}
739   void operator()(Symbol* value) {
740     assert(value != nullptr, "expected valid value");
741     Symbol *sym = value;
742     assert(sym->refcount() == 0, "refcount");
743     _deleted++;
744   }
745 };
746 
747 struct SymbolTableDeleteCheck : StackObj {
748   size_t _processed;
749   SymbolTableDeleteCheck() : _processed(0) {}
750   bool operator()(Symbol* value) {
751     assert(value != nullptr, "expected valid value");
752     _processed++;
753     Symbol *sym = value;
754     return (sym->refcount() == 0);
755   }
756 };
757 
758 void SymbolTable::clean_dead_entries(JavaThread* jt) {
759   SymbolTableHash::BulkDeleteTask bdt(_local_table);
760   if (!bdt.prepare(jt)) {
761     return;
762   }
763 
764   SymbolTableDeleteCheck stdc;
765   SymbolTableDoDelete stdd;
766   NativeHeapTrimmer::SuspendMark sm("symboltable");
767   {
768     TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
769     while (bdt.do_task(jt, stdc, stdd)) {
770       bdt.pause(jt);
771       {
772         ThreadBlockInVM tbivm(jt);
773       }
774       bdt.cont(jt);
775     }
776     reset_has_items_to_clean();
777     bdt.done(jt);
778   }
779 
780   Atomic::add(&_symbols_counted, stdc._processed);
781 
782   log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT,
783                          stdd._deleted, stdc._processed);
784 }
785 
786 void SymbolTable::check_concurrent_work() {
787   if (has_work()) {
788     return;
789   }
790   // We should clean/resize if we have
791   // more items than preferred load factor or
792   // more dead items than water mark.
793   if (has_items_to_clean() || (get_load_factor() > PREF_AVG_LIST_LEN)) {
794     log_debug(symboltable)("Concurrent work triggered, load factor: %f, items to clean: %s",
795                            get_load_factor(), has_items_to_clean() ? "true" : "false");
796     trigger_cleanup();
797   }
798 }
799 
800 bool SymbolTable::should_grow() {
801   return get_load_factor() > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached();
802 }
803 
804 void SymbolTable::do_concurrent_work(JavaThread* jt) {
805   // Rehash if needed.  Rehashing goes to a safepoint but the rest of this
806   // work is concurrent.
807   if (needs_rehashing() && maybe_rehash_table()) {
808     Atomic::release_store(&_has_work, false);
809     return; // done, else grow
810   }
811   log_debug(symboltable, perf)("Concurrent work, live factor: %g", get_load_factor());
812   // We prefer growing, since that also removes dead items
813   if (should_grow()) {
814     grow(jt);
815   } else {
816     clean_dead_entries(jt);
817   }
818   Atomic::release_store(&_has_work, false);
819 }
820 
821 // Called at VM_Operation safepoint
822 void SymbolTable::rehash_table() {
823   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint");
824   // The ServiceThread initiates the rehashing so it is not resizing.
825   assert (_local_table->is_safepoint_safe(), "Should not be resizing now");
826 
827   _alt_hash_seed = AltHashing::compute_seed();
828 
829   // We use current size
830   size_t new_size = _local_table->get_size_log2(Thread::current());
831   SymbolTableHash* new_table = new SymbolTableHash(new_size, END_SIZE, REHASH_LEN, true);
832   // Use alt hash from now on
833   _alt_hash = true;
834   _local_table->rehash_nodes_to(Thread::current(), new_table);
835 
836   // free old table
837   delete _local_table;
838   _local_table = new_table;
839 
840   _rehashed = true;
841   _needs_rehashing = false;
842 }
843 
844 bool SymbolTable::maybe_rehash_table() {
845   log_debug(symboltable)("Table imbalanced, rehashing called.");
846 
847   // Grow instead of rehash.
848   if (should_grow()) {
849     log_debug(symboltable)("Choosing growing over rehashing.");
850     _needs_rehashing = false;
851     return false;
852   }
853 
854   // Already rehashed.
855   if (_rehashed) {
856     log_warning(symboltable)("Rehashing already done, still long lists.");
857     _needs_rehashing = false;
858     return false;
859   }
860 
861   VM_RehashSymbolTable op;
862   VMThread::execute(&op);
863   return true;
864 }
865 
866 //---------------------------------------------------------------------------
867 // Non-product code
868 
869 #ifndef PRODUCT
870 
871 class HistogramIterator : StackObj {
872 public:
873   static const size_t results_length = 100;
874   size_t counts[results_length];
875   size_t sizes[results_length];
876   size_t total_size;
877   size_t total_count;
878   size_t total_length;
879   size_t max_length;
880   size_t out_of_range_count;
881   size_t out_of_range_size;
882   HistogramIterator() : total_size(0), total_count(0), total_length(0),
883                         max_length(0), out_of_range_count(0), out_of_range_size(0) {
884     // initialize results to zero
885     for (size_t i = 0; i < results_length; i++) {
886       counts[i] = 0;
887       sizes[i] = 0;
888     }
889   }
890   bool operator()(Symbol* value) {
891     assert(value != nullptr, "expected valid value");
892     Symbol* sym = value;
893     size_t size = sym->size();
894     size_t len = sym->utf8_length();
895     if (len < results_length) {
896       counts[len]++;
897       sizes[len] += size;
898     } else {
899       out_of_range_count++;
900       out_of_range_size += size;
901     }
902     total_count++;
903     total_size += size;
904     total_length += len;
905     max_length = MAX2(max_length, len);
906 
907     return true;
908   };
909 };
910 
911 void SymbolTable::print_histogram() {
912   HistogramIterator hi;
913   _local_table->do_scan(Thread::current(), hi);
914   tty->print_cr("Symbol Table Histogram:");
915   tty->print_cr("  Total number of symbols  " SIZE_FORMAT_W(7), hi.total_count);
916   tty->print_cr("  Total size in memory     " SIZE_FORMAT_W(7) "K", (hi.total_size * wordSize) / K);
917   tty->print_cr("  Total counted            " SIZE_FORMAT_W(7), _symbols_counted);
918   tty->print_cr("  Total removed            " SIZE_FORMAT_W(7), _symbols_removed);
919   if (_symbols_counted > 0) {
920     tty->print_cr("  Percent removed          %3.2f",
921           ((double)_symbols_removed / (double)_symbols_counted) * 100);
922   }
923   tty->print_cr("  Reference counts         " SIZE_FORMAT_W(7), Symbol::_total_count);
924   tty->print_cr("  Symbol arena used        " SIZE_FORMAT_W(7) "K", arena()->used() / K);
925   tty->print_cr("  Symbol arena size        " SIZE_FORMAT_W(7) "K", arena()->size_in_bytes() / K);
926   tty->print_cr("  Total symbol length      " SIZE_FORMAT_W(7), hi.total_length);
927   tty->print_cr("  Maximum symbol length    " SIZE_FORMAT_W(7), hi.max_length);
928   tty->print_cr("  Average symbol length    %7.2f", ((double)hi.total_length / (double)hi.total_count));
929   tty->print_cr("  Symbol length histogram:");
930   tty->print_cr("    %6s %10s %10s", "Length", "#Symbols", "Size");
931   for (size_t i = 0; i < hi.results_length; i++) {
932     if (hi.counts[i] > 0) {
933       tty->print_cr("    " SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K",
934                     i, hi.counts[i], (hi.sizes[i] * wordSize) / K);
935     }
936   }
937   tty->print_cr("  >=" SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K\n",
938                 hi.results_length, hi.out_of_range_count, (hi.out_of_range_size*wordSize) / K);
939 }
940 #endif // PRODUCT
941 
942 // Utility for dumping symbols
943 SymboltableDCmd::SymboltableDCmd(outputStream* output, bool heap) :
944                                  DCmdWithParser(output, heap),
945   _verbose("-verbose", "Dump the content of each symbol in the table",
946            "BOOLEAN", false, "false") {
947   _dcmdparser.add_dcmd_option(&_verbose);
948 }
949 
950 void SymboltableDCmd::execute(DCmdSource source, TRAPS) {
951   VM_DumpHashtable dumper(output(), VM_DumpHashtable::DumpSymbols,
952                          _verbose.value());
953   VMThread::execute(&dumper);
954 }