< prev index next >

src/hotspot/share/memory/filemap.cpp

Print this page




1063 // Map the whole region at once, assumed to be allocated contiguously.
1064 ReservedSpace FileMapInfo::reserve_shared_memory() {
1065   char* requested_addr = region_addr(0);
1066   size_t size = FileMapInfo::core_spaces_size();
1067 
1068   // Reserve the space first, then map otherwise map will go right over some
1069   // other reserved memory (like the code cache).
1070   ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
1071   if (!rs.is_reserved()) {
1072     fail_continue("Unable to reserve shared space at required address "
1073                   INTPTR_FORMAT, p2i(requested_addr));
1074     return rs;
1075   }
1076   // the reserved virtual memory is for mapping class data sharing archive
1077   MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
1078 
1079   return rs;
1080 }
1081 
1082 // Memory map a region in the address space.
1083 static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode",
1084                                             "String1", "String2", "OpenArchive1", "OpenArchive2" };
1085 
1086 char* FileMapInfo::map_regions(int regions[], char* saved_base[], size_t len) {
1087   char* prev_top = NULL;
1088   char* curr_base;
1089   char* curr_top;
1090   int i = 0;
1091   for (i = 0; i < (int)len; i++) {
1092     curr_base = map_region(regions[i], &curr_top);
1093     if (curr_base == NULL) {
1094       return NULL;
1095     }
1096     if (i > 0) {
1097       // We require that mc->rw->ro->md to be laid out consecutively, with no
1098       // gaps between them. That way, we can ensure that the OS won't be able to
1099       // allocate any new memory spaces inside _shared_metaspace_{base,top}, which
1100       // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
1101       assert(curr_base == prev_top, "must be");
1102     }
1103     log_info(cds)("Mapped region #%d at base %p top %p", regions[i], curr_base, curr_top);
1104     saved_base[i] = curr_base;
1105     prev_top = curr_top;
1106   }
1107   return curr_top;
1108 }
1109 
1110 char* FileMapInfo::map_region(int i, char** top_ret) {
1111   assert(!HeapShared::is_heap_region(i), "sanity");
1112   CDSFileMapRegion* si = space_at(i);
1113   size_t used = si->_used;
1114   size_t alignment = os::vm_allocation_granularity();
1115   size_t size = align_up(used, alignment);
1116   char *requested_addr = region_addr(i);
1117 




1063 // Map the whole region at once, assumed to be allocated contiguously.
1064 ReservedSpace FileMapInfo::reserve_shared_memory() {
1065   char* requested_addr = region_addr(0);
1066   size_t size = FileMapInfo::core_spaces_size();
1067 
1068   // Reserve the space first, then map otherwise map will go right over some
1069   // other reserved memory (like the code cache).
1070   ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
1071   if (!rs.is_reserved()) {
1072     fail_continue("Unable to reserve shared space at required address "
1073                   INTPTR_FORMAT, p2i(requested_addr));
1074     return rs;
1075   }
1076   // the reserved virtual memory is for mapping class data sharing archive
1077   MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
1078 
1079   return rs;
1080 }
1081 
1082 // Memory map a region in the address space.
1083 static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode", "OptionalData",
1084                                             "String1", "String2", "OpenArchive1", "OpenArchive2" };
1085 
1086 char* FileMapInfo::map_regions(int regions[], char* saved_base[], size_t len) {
1087   char* prev_top = NULL;
1088   char* curr_base;
1089   char* curr_top;
1090   int i = 0;
1091   for (i = 0; i < (int)len; i++) {
1092     curr_base = map_region(regions[i], &curr_top);
1093     if (curr_base == NULL) {
1094       return NULL;
1095     }
1096     if (i > 0) {
1097       // We require that mc->rw->ro->md->od to be laid out consecutively, with no
1098       // gaps between them. That way, we can ensure that the OS won't be able to
1099       // allocate any new memory spaces inside _shared_metaspace_{base,top}, which
1100       // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
1101       assert(curr_base == prev_top, "must be");
1102     }
1103     log_info(cds)("Mapped region #%d at base %p top %p", regions[i], curr_base, curr_top);
1104     saved_base[i] = curr_base;
1105     prev_top = curr_top;
1106   }
1107   return curr_top;
1108 }
1109 
1110 char* FileMapInfo::map_region(int i, char** top_ret) {
1111   assert(!HeapShared::is_heap_region(i), "sanity");
1112   CDSFileMapRegion* si = space_at(i);
1113   size_t used = si->_used;
1114   size_t alignment = os::vm_allocation_granularity();
1115   size_t size = align_up(used, alignment);
1116   char *requested_addr = region_addr(i);
1117 


< prev index next >