< prev index next >

src/hotspot/share/memory/metaspaceShared.cpp

Print this page




  73 #if INCLUDE_G1GC
  74 #include "gc/g1/g1CollectedHeap.hpp"
  75 #endif
  76 
  77 ReservedSpace MetaspaceShared::_shared_rs;
  78 VirtualSpace MetaspaceShared::_shared_vs;
  79 MetaspaceSharedStats MetaspaceShared::_stats;
  80 bool MetaspaceShared::_has_error_classes;
  81 bool MetaspaceShared::_archive_loading_failed = false;
  82 bool MetaspaceShared::_remapped_readwrite = false;
  83 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  84 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  85 size_t MetaspaceShared::_core_spaces_size = 0;
  86 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
  87 
  88 // The CDS archive is divided into the following regions:
  89 //     mc  - misc code (the method entry trampolines)
  90 //     rw  - read-write metadata
  91 //     ro  - read-only metadata and read-only tables
  92 //     md  - misc data (the c++ vtables)

  93 //
  94 //     ca0 - closed archive heap space #0
  95 //     ca1 - closed archive heap space #1 (may be empty)
  96 //     oa0 - open archive heap space #0
  97 //     oa1 - open archive heap space #1 (may be empty)
  98 //
  99 // The mc, rw, ro, and md regions are linearly allocated, starting from
 100 // SharedBaseAddress, in the order of mc->rw->ro->md. The size of these 4 regions
 101 // are page-aligned, and there's no gap between any consecutive regions.
 102 //
 103 // These 4 regions are populated in the following steps:
 104 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
 105 //     temporarily allocated outside of the shared regions. Only the method entry
 106 //     trampolines are written into the mc region.
 107 // [2] ArchiveCompactor copies RW metadata into the rw region.
 108 // [3] ArchiveCompactor copies RO metadata into the ro region.
 109 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 110 //     are copied into the ro region as read-only tables.
 111 // [5] C++ vtables are copied into the md region.

 112 //
 113 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
 114 // Their layout is independent of the other 4 regions.
 115 
 116 char* DumpRegion::expand_top_to(char* newtop) {
 117   assert(is_allocatable(), "must be initialized and not packed");
 118   assert(newtop >= _top, "must not grow backwards");
 119   if (newtop > _end) {
 120     MetaspaceShared::report_out_of_space(_name, newtop - _top);
 121     ShouldNotReachHere();
 122   }
 123   uintx delta;
 124   if (DynamicDumpSharedSpaces) {
 125     delta = DynamicArchive::object_delta_uintx(newtop);
 126   } else {
 127     delta = MetaspaceShared::object_delta_uintx(newtop);
 128   }
 129   if (delta > MAX_SHARED_DELTA) {
 130     // This is just a sanity check and should not appear in any real world usage. This
 131     // happens only if you allocate more than 2GB of shared objects and would require
 132     // millions of shared classes.
 133     vm_exit_during_initialization("Out of memory in the CDS archive",
 134                                   "Please reduce the number of shared classes.");


 155 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
 156   tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
 157              _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
 158   if (strcmp(_name, failing_region) == 0) {
 159     tty->print_cr(" required = %d", int(needed_bytes));
 160   } else {
 161     tty->cr();
 162   }
 163 }
 164 
 165 void DumpRegion::pack(DumpRegion* next) {
 166   assert(!is_packed(), "sanity");
 167   _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 168   _is_packed = true;
 169   if (next != NULL) {
 170     next->_base = next->_top = this->_end;
 171     next->_end = MetaspaceShared::shared_rs()->end();
 172   }
 173 }
 174 
 175 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
 176 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
 177 
 178 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
 179   // Start with 0 committed bytes. The memory will be committed as needed by
 180   // MetaspaceShared::commit_shared_space_to().
 181   if (!_shared_vs.initialize(_shared_rs, 0)) {
 182     vm_exit_during_initialization("Unable to allocate memory for shared space");
 183   }
 184   first_space->init(&_shared_rs, (char*)first_space_bottom);
 185 }
 186 
 187 DumpRegion* MetaspaceShared::misc_code_dump_space() {
 188   return &_mc_region;
 189 }
 190 
 191 DumpRegion* MetaspaceShared::read_write_dump_space() {
 192   return &_rw_region;
 193 }
 194 
 195 DumpRegion* MetaspaceShared::read_only_dump_space() {
 196   return &_ro_region;
 197 }
 198 




 199 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
 200                                       ReservedSpace* rs) {
 201   current->pack(next);
 202 }
 203 
 204 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 205   return _mc_region.allocate(num_bytes);
 206 }
 207 
 208 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 209   return _ro_region.allocate(num_bytes);
 210 }
 211 
 212 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
 213   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
 214 
 215   // If using shared space, open the file that contains the shared space
 216   // and map in the memory before initializing the rest of metaspace (so
 217   // the addresses don't conflict)
 218   FileMapInfo* mapinfo = new FileMapInfo(true);


 267   if (requested_address != NULL) {
 268     _shared_rs = ReservedSpace(size, alignment, large, requested_address);
 269   } else {
 270     _shared_rs = ReservedSpace(size, alignment, large);
 271   }
 272   return &_shared_rs;
 273 }
 274 
 275 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
 276   assert(DumpSharedSpaces, "should be called for dump time only");
 277   const size_t reserve_alignment = Metaspace::reserve_alignment();
 278   bool large_pages = false; // No large pages when dumping the CDS archive.
 279   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 280 
 281 #ifdef _LP64
 282   // On 64-bit VM, the heap and class space layout will be the same as if
 283   // you're running in -Xshare:on mode:
 284   //
 285   //                              +-- SharedBaseAddress (default = 0x800000000)
 286   //                              v
 287   // +-..---------+---------+ ... +----+----+----+----+---------------+
 288   // |    Heap    | Archive |     | MC | RW | RO | MD | class space   |
 289   // +-..---------+---------+ ... +----+----+----+----+---------------+
 290   // |<--   MaxHeapSize  -->|     |<-- UnscaledClassSpaceMax = 4GB -->|
 291   //
 292   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 293   const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
 294 #else
 295   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
 296   size_t cds_total = align_down(256*M, reserve_alignment);
 297 #endif
 298 
 299   // First try to reserve the space at the specified SharedBaseAddress.
 300   //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
 301   reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base);
 302   if (_shared_rs.is_reserved()) {
 303     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
 304   } else {
 305     // Get a mmap region anywhere if the SharedBaseAddress fails.
 306     //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
 307     reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL);
 308   }
 309   if (!_shared_rs.is_reserved()) {
 310     vm_exit_during_initialization("Unable to reserve memory for shared space",


1051   // prevent divide-by-zero
1052   if (ro_all < 1) {
1053     ro_all = 1;
1054   }
1055   if (rw_all < 1) {
1056     rw_all = 1;
1057   }
1058 
1059   int all_ro_count = 0;
1060   int all_ro_bytes = 0;
1061   int all_rw_count = 0;
1062   int all_rw_bytes = 0;
1063 
1064 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
1065 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
1066   const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
1067   const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %";
1068 
1069   LogMessage(cds) msg;
1070 
1071   msg.info("Detailed metadata info (excluding st regions; rw stats include md/mc regions):");
1072   msg.info("%s", hdr);
1073   msg.info("%s", sep);
1074   for (int type = 0; type < int(_number_of_types); type ++) {
1075     const char *name = type_name((Type)type);
1076     int ro_count = _counts[RO][type];
1077     int ro_bytes = _bytes [RO][type];
1078     int rw_count = _counts[RW][type];
1079     int rw_bytes = _bytes [RW][type];
1080     int count = ro_count + rw_count;
1081     int bytes = ro_bytes + rw_bytes;
1082 
1083     double ro_perc = percent_of(ro_bytes, ro_all);
1084     double rw_perc = percent_of(rw_bytes, rw_all);
1085     double perc    = percent_of(bytes, ro_all + rw_all);
1086 
1087     msg.info(fmt_stats, name,
1088                          ro_count, ro_bytes, ro_perc,
1089                          rw_count, rw_bytes, rw_perc,
1090                          count, bytes, perc);
1091 


1910 
1911     ArchiveHeapOopmapInfo info;
1912     info._oopmap = (address)buffer;
1913     info._oopmap_size_in_bits = size_in_bits;
1914     oopmaps->append(info);
1915   }
1916 }
1917 #endif // INCLUDE_CDS_JAVA_HEAP
1918 
1919 void ReadClosure::do_ptr(void** p) {
1920   assert(*p == NULL, "initializing previous initialized pointer.");
1921   intptr_t obj = nextPtr();
1922   assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1923          "hit tag while initializing ptrs.");
1924   *p = (void*)obj;
1925 }
1926 
1927 void ReadClosure::do_u4(u4* p) {
1928   intptr_t obj = nextPtr();
1929   *p = (u4)(uintx(obj));
1930 }
1931 
1932 void ReadClosure::do_bool(bool* p) {
1933   intptr_t obj = nextPtr();
1934   *p = (bool)(uintx(obj));
1935 }
1936 
1937 void ReadClosure::do_tag(int tag) {
1938   int old_tag;
1939   old_tag = (int)(intptr_t)nextPtr();
1940   // do_int(&old_tag);
1941   assert(tag == old_tag, "old tag doesn't match");
1942   FileMapInfo::assert_mark(tag == old_tag);
1943 }
1944 
1945 void ReadClosure::do_oop(oop *p) {
1946   narrowOop o = (narrowOop)nextPtr();
1947   if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
1948     p = NULL;
1949   } else {
1950     assert(HeapShared::is_heap_object_archiving_allowed(),
1951            "Archived heap object is not allowed");
1952     assert(HeapShared::open_archive_heap_region_mapped(),
1953            "Open archive heap region is not mapped");
1954     *p = HeapShared::decode_from_archive(o);




  73 #if INCLUDE_G1GC
  74 #include "gc/g1/g1CollectedHeap.hpp"
  75 #endif
  76 
  77 ReservedSpace MetaspaceShared::_shared_rs;
  78 VirtualSpace MetaspaceShared::_shared_vs;
  79 MetaspaceSharedStats MetaspaceShared::_stats;
  80 bool MetaspaceShared::_has_error_classes;
  81 bool MetaspaceShared::_archive_loading_failed = false;
  82 bool MetaspaceShared::_remapped_readwrite = false;
  83 address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
  84 size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
  85 size_t MetaspaceShared::_core_spaces_size = 0;
  86 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
  87 
  88 // The CDS archive is divided into the following regions:
  89 //     mc  - misc code (the method entry trampolines)
  90 //     rw  - read-write metadata
  91 //     ro  - read-only metadata and read-only tables
  92 //     md  - misc data (the c++ vtables)
  93 //     od  - optional data (original class files)
  94 //
  95 //     ca0 - closed archive heap space #0
  96 //     ca1 - closed archive heap space #1 (may be empty)
  97 //     oa0 - open archive heap space #0
  98 //     oa1 - open archive heap space #1 (may be empty)
  99 //
 100 // The mc, rw, ro, md and od regions are linearly allocated, starting from
 101 // SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
 102 // are page-aligned, and there's no gap between any consecutive regions.
 103 //
 104 // These 5 regions are populated in the following steps:
 105 // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
 106 //     temporarily allocated outside of the shared regions. Only the method entry
 107 //     trampolines are written into the mc region.
 108 // [2] ArchiveCompactor copies RW metadata into the rw region.
 109 // [3] ArchiveCompactor copies RO metadata into the ro region.
 110 // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
 111 //     are copied into the ro region as read-only tables.
 112 // [5] C++ vtables are copied into the md region.
 113 // [6] Original class files are copied into the od region.
 114 //
 115 // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
 116 // Their layout is independent of the other 5 regions.
 117 
 118 char* DumpRegion::expand_top_to(char* newtop) {
 119   assert(is_allocatable(), "must be initialized and not packed");
 120   assert(newtop >= _top, "must not grow backwards");
 121   if (newtop > _end) {
 122     MetaspaceShared::report_out_of_space(_name, newtop - _top);
 123     ShouldNotReachHere();
 124   }
 125   uintx delta;
 126   if (DynamicDumpSharedSpaces) {
 127     delta = DynamicArchive::object_delta_uintx(newtop);
 128   } else {
 129     delta = MetaspaceShared::object_delta_uintx(newtop);
 130   }
 131   if (delta > MAX_SHARED_DELTA) {
 132     // This is just a sanity check and should not appear in any real world usage. This
 133     // happens only if you allocate more than 2GB of shared objects and would require
 134     // millions of shared classes.
 135     vm_exit_during_initialization("Out of memory in the CDS archive",
 136                                   "Please reduce the number of shared classes.");


 157 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
 158   tty->print("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
 159              _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
 160   if (strcmp(_name, failing_region) == 0) {
 161     tty->print_cr(" required = %d", int(needed_bytes));
 162   } else {
 163     tty->cr();
 164   }
 165 }
 166 
 167 void DumpRegion::pack(DumpRegion* next) {
 168   assert(!is_packed(), "sanity");
 169   _end = (char*)align_up(_top, Metaspace::reserve_alignment());
 170   _is_packed = true;
 171   if (next != NULL) {
 172     next->_base = next->_top = this->_end;
 173     next->_end = MetaspaceShared::shared_rs()->end();
 174   }
 175 }
 176 
 177 DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
 178 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
 179 
 180 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
 181   // Start with 0 committed bytes. The memory will be committed as needed by
 182   // MetaspaceShared::commit_shared_space_to().
 183   if (!_shared_vs.initialize(_shared_rs, 0)) {
 184     vm_exit_during_initialization("Unable to allocate memory for shared space");
 185   }
 186   first_space->init(&_shared_rs, (char*)first_space_bottom);
 187 }
 188 
 189 DumpRegion* MetaspaceShared::misc_code_dump_space() {
 190   return &_mc_region;
 191 }
 192 
 193 DumpRegion* MetaspaceShared::read_write_dump_space() {
 194   return &_rw_region;
 195 }
 196 
 197 DumpRegion* MetaspaceShared::read_only_dump_space() {
 198   return &_ro_region;
 199 }
 200 
 201 DumpRegion* MetaspaceShared::optional_data_dump_space() {
 202   return &_od_region;
 203 }
 204 
 205 void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
 206                                       ReservedSpace* rs) {
 207   current->pack(next);
 208 }
 209 
 210 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
 211   return _mc_region.allocate(num_bytes);
 212 }
 213 
 214 char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
 215   return _ro_region.allocate(num_bytes);
 216 }
 217 
 218 void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
 219   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
 220 
 221   // If using shared space, open the file that contains the shared space
 222   // and map in the memory before initializing the rest of metaspace (so
 223   // the addresses don't conflict)
 224   FileMapInfo* mapinfo = new FileMapInfo(true);


 273   if (requested_address != NULL) {
 274     _shared_rs = ReservedSpace(size, alignment, large, requested_address);
 275   } else {
 276     _shared_rs = ReservedSpace(size, alignment, large);
 277   }
 278   return &_shared_rs;
 279 }
 280 
 281 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
 282   assert(DumpSharedSpaces, "should be called for dump time only");
 283   const size_t reserve_alignment = Metaspace::reserve_alignment();
 284   bool large_pages = false; // No large pages when dumping the CDS archive.
 285   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 286 
 287 #ifdef _LP64
 288   // On 64-bit VM, the heap and class space layout will be the same as if
 289   // you're running in -Xshare:on mode:
 290   //
 291   //                              +-- SharedBaseAddress (default = 0x800000000)
 292   //                              v
 293   // +-..---------+---------+ ... +----+----+----+----+----+---------------+
 294   // |    Heap    | Archive |     | MC | RW | RO | MD | OD | class space   |
 295   // +-..---------+---------+ ... +----+----+----+----+----+---------------+
 296   // |<--   MaxHeapSize  -->|     |<-- UnscaledClassSpaceMax = 4GB ------->|
 297   //
 298   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 299   const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
 300 #else
 301   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
 302   size_t cds_total = align_down(256*M, reserve_alignment);
 303 #endif
 304 
 305   // First try to reserve the space at the specified SharedBaseAddress.
 306   //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
 307   reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base);
 308   if (_shared_rs.is_reserved()) {
 309     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
 310   } else {
 311     // Get a mmap region anywhere if the SharedBaseAddress fails.
 312     //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
 313     reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL);
 314   }
 315   if (!_shared_rs.is_reserved()) {
 316     vm_exit_during_initialization("Unable to reserve memory for shared space",


1057   // prevent divide-by-zero
1058   if (ro_all < 1) {
1059     ro_all = 1;
1060   }
1061   if (rw_all < 1) {
1062     rw_all = 1;
1063   }
1064 
1065   int all_ro_count = 0;
1066   int all_ro_bytes = 0;
1067   int all_rw_count = 0;
1068   int all_rw_bytes = 0;
1069 
1070 // To make fmt_stats be a syntactic constant (for format warnings), use #define.
1071 #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
1072   const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
1073   const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %";
1074 
1075   LogMessage(cds) msg;
1076 
1077   msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
1078   msg.info("%s", hdr);
1079   msg.info("%s", sep);
1080   for (int type = 0; type < int(_number_of_types); type ++) {
1081     const char *name = type_name((Type)type);
1082     int ro_count = _counts[RO][type];
1083     int ro_bytes = _bytes [RO][type];
1084     int rw_count = _counts[RW][type];
1085     int rw_bytes = _bytes [RW][type];
1086     int count = ro_count + rw_count;
1087     int bytes = ro_bytes + rw_bytes;
1088 
1089     double ro_perc = percent_of(ro_bytes, ro_all);
1090     double rw_perc = percent_of(rw_bytes, rw_all);
1091     double perc    = percent_of(bytes, ro_all + rw_all);
1092 
1093     msg.info(fmt_stats, name,
1094                          ro_count, ro_bytes, ro_perc,
1095                          rw_count, rw_bytes, rw_perc,
1096                          count, bytes, perc);
1097 


1916 
1917     ArchiveHeapOopmapInfo info;
1918     info._oopmap = (address)buffer;
1919     info._oopmap_size_in_bits = size_in_bits;
1920     oopmaps->append(info);
1921   }
1922 }
1923 #endif // INCLUDE_CDS_JAVA_HEAP
1924 
1925 void ReadClosure::do_ptr(void** p) {
1926   assert(*p == NULL, "initializing previous initialized pointer.");
1927   intptr_t obj = nextPtr();
1928   assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
1929          "hit tag while initializing ptrs.");
1930   *p = (void*)obj;
1931 }
1932 
1933 void ReadClosure::do_u4(u4* p) {
1934   intptr_t obj = nextPtr();
1935   *p = (u4)(uintx(obj));





1936 }
1937 
1938 void ReadClosure::do_tag(int tag) {
1939   int old_tag;
1940   old_tag = (int)(intptr_t)nextPtr();
1941   // do_int(&old_tag);
1942   assert(tag == old_tag, "old tag doesn't match");
1943   FileMapInfo::assert_mark(tag == old_tag);
1944 }
1945 
1946 void ReadClosure::do_oop(oop *p) {
1947   narrowOop o = (narrowOop)nextPtr();
1948   if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
1949     p = NULL;
1950   } else {
1951     assert(HeapShared::is_heap_object_archiving_allowed(),
1952            "Archived heap object is not allowed");
1953     assert(HeapShared::open_archive_heap_region_mapped(),
1954            "Open archive heap region is not mapped");
1955     *p = HeapShared::decode_from_archive(o);


< prev index next >