1 /*
  2  * Copyright (c) 2006, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/parallel/mutableNUMASpace.hpp"
 27 #include "gc/shared/collectedHeap.hpp"
 28 #include "gc/shared/gc_globals.hpp"
 29 #include "gc/shared/spaceDecorator.hpp"
 30 #include "gc/shared/workgroup.hpp"
 31 #include "memory/allocation.inline.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "oops/typeArrayOop.hpp"
 34 #include "runtime/atomic.hpp"
 35 #include "runtime/java.hpp"
 36 #include "runtime/os.inline.hpp"
 37 #include "runtime/thread.inline.hpp"
 38 #include "runtime/threadSMR.hpp"
 39 #include "utilities/align.hpp"
 40 
 41 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) {
 42   _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, mtGC);
 43   _page_size = os::vm_page_size();
 44   _adaptation_cycles = 0;
 45   _samples_count = 0;
 46 
 47 #ifdef LINUX
 48   // Changing the page size can lead to freeing of memory. When using large pages
 49   // and the memory has been both reserved and committed, Linux does not support
 50   // freeing parts of it.
 51     if (UseLargePages && !os::can_commit_large_page_memory()) {
 52       _must_use_large_pages = true;
 53     }
 54 #endif // LINUX
 55 
 56   update_layout(true);
 57 }
 58 
 59 MutableNUMASpace::~MutableNUMASpace() {
 60   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 61     delete lgrp_spaces()->at(i);
 62   }
 63   delete lgrp_spaces();
 64 }
 65 
 66 #ifndef PRODUCT
 67 void MutableNUMASpace::mangle_unused_area() {
 68   // This method should do nothing.
 69   // It can be called on a numa space during a full compaction.
 70 }
 71 void MutableNUMASpace::mangle_unused_area_complete() {
 72   // This method should do nothing.
 73   // It can be called on a numa space during a full compaction.
 74 }
 75 void MutableNUMASpace::mangle_region(MemRegion mr) {
 76   // This method should do nothing because numa spaces are not mangled.
 77 }
 78 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
 79   assert(false, "Do not mangle MutableNUMASpace's");
 80 }
 81 void MutableNUMASpace::set_top_for_allocations() {
 82   // This method should do nothing.
 83 }
 84 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
 85   // This method should do nothing.
 86 }
 87 void MutableNUMASpace::check_mangled_unused_area_complete() {
 88   // This method should do nothing.
 89 }
 90 #endif  // NOT_PRODUCT
 91 
 92 // There may be unallocated holes in the middle chunks
 93 // that should be filled with dead objects to ensure parsability.
 94 void MutableNUMASpace::ensure_parsability() {
 95   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 96     LGRPSpace *ls = lgrp_spaces()->at(i);
 97     MutableSpace *s = ls->space();
 98     if (s->top() < top()) { // For all spaces preceding the one containing top()
 99       if (s->free_in_words() > 0) {
100         HeapWord* cur_top = s->top();
101         size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
102         while (words_left_to_fill > 0) {
103           size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
104           assert(words_to_fill >= CollectedHeap::min_fill_size(),
105                  "Remaining size (" SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
106                  words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size());
107           CollectedHeap::fill_with_object(cur_top, words_to_fill);
108           if (!os::numa_has_static_binding()) {
109             size_t touched_words = words_to_fill;
110 #ifndef ASSERT
111             if (!ZapUnusedHeapArea) {
112               touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
113                 touched_words);
114             }
115 #endif
116             MemRegion invalid;
117             HeapWord *crossing_start = align_up(cur_top, os::vm_page_size());
118             HeapWord *crossing_end = align_down(cur_top + touched_words, os::vm_page_size());
119             if (crossing_start != crossing_end) {
120               // If object header crossed a small page boundary we mark the area
121               // as invalid rounding it to a page_size().
122               HeapWord *start = MAX2(align_down(cur_top, page_size()), s->bottom());
123               HeapWord *end = MIN2(align_up(cur_top + touched_words, page_size()), s->end());
124               invalid = MemRegion(start, end);
125             }
126 
127             ls->add_invalid_region(invalid);
128           }
129           cur_top += words_to_fill;
130           words_left_to_fill -= words_to_fill;
131         }
132       }
133     } else {
134       if (!os::numa_has_static_binding()) {
135 #ifdef ASSERT
136         MemRegion invalid(s->top(), s->end());
137         ls->add_invalid_region(invalid);
138 #else
139         if (ZapUnusedHeapArea) {
140           MemRegion invalid(s->top(), s->end());
141           ls->add_invalid_region(invalid);
142         } else {
143           return;
144         }
145 #endif
146       } else {
147           return;
148       }
149     }
150   }
151 }
152 
153 size_t MutableNUMASpace::used_in_words() const {
154   size_t s = 0;
155   for (int i = 0; i < lgrp_spaces()->length(); i++) {
156     s += lgrp_spaces()->at(i)->space()->used_in_words();
157   }
158   return s;
159 }
160 
161 size_t MutableNUMASpace::free_in_words() const {
162   size_t s = 0;
163   for (int i = 0; i < lgrp_spaces()->length(); i++) {
164     s += lgrp_spaces()->at(i)->space()->free_in_words();
165   }
166   return s;
167 }
168 
169 
170 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
171   guarantee(thr != NULL, "No thread");
172   int lgrp_id = thr->lgrp_id();
173   if (lgrp_id == -1) {
174     // This case can occur after the topology of the system has
175     // changed. Thread can change their location, the new home
176     // group will be determined during the first allocation
177     // attempt. For now we can safely assume that all spaces
178     // have equal size because the whole space will be reinitialized.
179     if (lgrp_spaces()->length() > 0) {
180       return capacity_in_bytes() / lgrp_spaces()->length();
181     } else {
182       assert(false, "There should be at least one locality group");
183       return 0;
184     }
185   }
186   // That's the normal case, where we know the locality group of the thread.
187   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
188   if (i == -1) {
189     return 0;
190   }
191   return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
192 }
193 
194 size_t MutableNUMASpace::tlab_used(Thread *thr) const {
195   // Please see the comments for tlab_capacity().
196   guarantee(thr != NULL, "No thread");
197   int lgrp_id = thr->lgrp_id();
198   if (lgrp_id == -1) {
199     if (lgrp_spaces()->length() > 0) {
200       return (used_in_bytes()) / lgrp_spaces()->length();
201     } else {
202       assert(false, "There should be at least one locality group");
203       return 0;
204     }
205   }
206   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
207   if (i == -1) {
208     return 0;
209   }
210   return lgrp_spaces()->at(i)->space()->used_in_bytes();
211 }
212 
213 
214 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
215   // Please see the comments for tlab_capacity().
216   guarantee(thr != NULL, "No thread");
217   int lgrp_id = thr->lgrp_id();
218   if (lgrp_id == -1) {
219     if (lgrp_spaces()->length() > 0) {
220       return free_in_bytes() / lgrp_spaces()->length();
221     } else {
222       assert(false, "There should be at least one locality group");
223       return 0;
224     }
225   }
226   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
227   if (i == -1) {
228     return 0;
229   }
230   return lgrp_spaces()->at(i)->space()->free_in_bytes();
231 }
232 
233 
234 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
235   guarantee(thr != NULL, "No thread");
236   int lgrp_id = thr->lgrp_id();
237   if (lgrp_id == -1) {
238     if (lgrp_spaces()->length() > 0) {
239       return capacity_in_words() / lgrp_spaces()->length();
240     } else {
241       assert(false, "There should be at least one locality group");
242       return 0;
243     }
244   }
245   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
246   if (i == -1) {
247     return 0;
248   }
249   return lgrp_spaces()->at(i)->space()->capacity_in_words();
250 }
251 
252 // Check if the NUMA topology has changed. Add and remove spaces if needed.
253 // The update can be forced by setting the force parameter equal to true.
254 bool MutableNUMASpace::update_layout(bool force) {
255   // Check if the topology had changed.
256   bool changed = os::numa_topology_changed();
257   if (force || changed) {
258     // Compute lgrp intersection. Add/remove spaces.
259     int lgrp_limit = (int)os::numa_get_groups_num();
260     int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC);
261     int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
262     assert(lgrp_num > 0, "There should be at least one locality group");
263     // Add new spaces for the new nodes
264     for (int i = 0; i < lgrp_num; i++) {
265       bool found = false;
266       for (int j = 0; j < lgrp_spaces()->length(); j++) {
267         if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
268           found = true;
269           break;
270         }
271       }
272       if (!found) {
273         lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
274       }
275     }
276 
277     // Remove spaces for the removed nodes.
278     for (int i = 0; i < lgrp_spaces()->length();) {
279       bool found = false;
280       for (int j = 0; j < lgrp_num; j++) {
281         if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
282           found = true;
283           break;
284         }
285       }
286       if (!found) {
287         delete lgrp_spaces()->at(i);
288         lgrp_spaces()->remove_at(i);
289       } else {
290         i++;
291       }
292     }
293 
294     FREE_C_HEAP_ARRAY(int, lgrp_ids);
295 
296     if (changed) {
297       for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
298         thread->set_lgrp_id(-1);
299       }
300     }
301     return true;
302   }
303   return false;
304 }
305 
306 // Bias region towards the first-touching lgrp. Set the right page sizes.
307 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
308   HeapWord *start = align_up(mr.start(), page_size());
309   HeapWord *end = align_down(mr.end(), page_size());
310   if (end > start) {
311     MemRegion aligned_region(start, end);
312     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
313            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
314     assert(region().contains(aligned_region), "Sanity");
315     // First we tell the OS which page size we want in the given range. The underlying
316     // large page can be broken down if we require small pages.
317     os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
318     // Then we uncommit the pages in the range.
319     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
320     // And make them local/first-touch biased.
321     os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
322   }
323 }
324 
325 // Free all pages in the region.
326 void MutableNUMASpace::free_region(MemRegion mr) {
327   HeapWord *start = align_up(mr.start(), page_size());
328   HeapWord *end = align_down(mr.end(), page_size());
329   if (end > start) {
330     MemRegion aligned_region(start, end);
331     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
332            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
333     assert(region().contains(aligned_region), "Sanity");
334     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
335   }
336 }
337 
338 // Update space layout. Perform adaptation.
339 void MutableNUMASpace::update() {
340   if (update_layout(false)) {
341     // If the topology has changed, make all chunks zero-sized.
342     // And clear the alloc-rate statistics.
343     // In future we may want to handle this more gracefully in order
344     // to avoid the reallocation of the pages as much as possible.
345     for (int i = 0; i < lgrp_spaces()->length(); i++) {
346       LGRPSpace *ls = lgrp_spaces()->at(i);
347       MutableSpace *s = ls->space();
348       s->set_end(s->bottom());
349       s->set_top(s->bottom());
350       ls->clear_alloc_rate();
351     }
352     // A NUMA space is never mangled
353     initialize(region(),
354                SpaceDecorator::Clear,
355                SpaceDecorator::DontMangle);
356   } else {
357     bool should_initialize = false;
358     if (!os::numa_has_static_binding()) {
359       for (int i = 0; i < lgrp_spaces()->length(); i++) {
360         if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
361           should_initialize = true;
362           break;
363         }
364       }
365     }
366 
367     if (should_initialize ||
368         (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
369       // A NUMA space is never mangled
370       initialize(region(),
371                  SpaceDecorator::Clear,
372                  SpaceDecorator::DontMangle);
373     }
374   }
375 
376   if (NUMAStats) {
377     for (int i = 0; i < lgrp_spaces()->length(); i++) {
378       lgrp_spaces()->at(i)->accumulate_statistics(page_size());
379     }
380   }
381 
382   scan_pages(NUMAPageScanRate);
383 }
384 
385 // Scan pages. Free pages that have smaller size or wrong placement.
386 void MutableNUMASpace::scan_pages(size_t page_count)
387 {
388   size_t pages_per_chunk = page_count / lgrp_spaces()->length();
389   if (pages_per_chunk > 0) {
390     for (int i = 0; i < lgrp_spaces()->length(); i++) {
391       LGRPSpace *ls = lgrp_spaces()->at(i);
392       ls->scan_pages(page_size(), pages_per_chunk);
393     }
394   }
395 }
396 
397 // Accumulate statistics about the allocation rate of each lgrp.
398 void MutableNUMASpace::accumulate_statistics() {
399   if (UseAdaptiveNUMAChunkSizing) {
400     for (int i = 0; i < lgrp_spaces()->length(); i++) {
401       lgrp_spaces()->at(i)->sample();
402     }
403     increment_samples_count();
404   }
405 
406   if (NUMAStats) {
407     for (int i = 0; i < lgrp_spaces()->length(); i++) {
408       lgrp_spaces()->at(i)->accumulate_statistics(page_size());
409     }
410   }
411 }
412 
413 // Get the current size of a chunk.
414 // This function computes the size of the chunk based on the
415 // difference between chunk ends. This allows it to work correctly in
416 // case the whole space is resized and during the process of adaptive
417 // chunk resizing.
418 size_t MutableNUMASpace::current_chunk_size(int i) {
419   HeapWord *cur_end, *prev_end;
420   if (i == 0) {
421     prev_end = bottom();
422   } else {
423     prev_end = lgrp_spaces()->at(i - 1)->space()->end();
424   }
425   if (i == lgrp_spaces()->length() - 1) {
426     cur_end = end();
427   } else {
428     cur_end = lgrp_spaces()->at(i)->space()->end();
429   }
430   if (cur_end > prev_end) {
431     return pointer_delta(cur_end, prev_end, sizeof(char));
432   }
433   return 0;
434 }
435 
436 // Return the default chunk size by equally diving the space.
437 // page_size() aligned.
438 size_t MutableNUMASpace::default_chunk_size() {
439   return base_space_size() / lgrp_spaces()->length() * page_size();
440 }
441 
442 // Produce a new chunk size. page_size() aligned.
443 // This function is expected to be called on sequence of i's from 0 to
444 // lgrp_spaces()->length().
445 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
446   size_t pages_available = base_space_size();
447   for (int j = 0; j < i; j++) {
448     pages_available -= align_down(current_chunk_size(j), page_size()) / page_size();
449   }
450   pages_available -= lgrp_spaces()->length() - i - 1;
451   assert(pages_available > 0, "No pages left");
452   float alloc_rate = 0;
453   for (int j = i; j < lgrp_spaces()->length(); j++) {
454     alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
455   }
456   size_t chunk_size = 0;
457   if (alloc_rate > 0) {
458     LGRPSpace *ls = lgrp_spaces()->at(i);
459     chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size();
460   }
461   chunk_size = MAX2(chunk_size, page_size());
462 
463   if (limit > 0) {
464     limit = align_down(limit, page_size());
465     if (chunk_size > current_chunk_size(i)) {
466       size_t upper_bound = pages_available * page_size();
467       if (upper_bound > limit &&
468           current_chunk_size(i) < upper_bound - limit) {
469         // The resulting upper bound should not exceed the available
470         // amount of memory (pages_available * page_size()).
471         upper_bound = current_chunk_size(i) + limit;
472       }
473       chunk_size = MIN2(chunk_size, upper_bound);
474     } else {
475       size_t lower_bound = page_size();
476       if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
477         lower_bound = current_chunk_size(i) - limit;
478       }
479       chunk_size = MAX2(chunk_size, lower_bound);
480     }
481   }
482   assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
483   return chunk_size;
484 }
485 
486 
487 // Return the bottom_region and the top_region. Align them to page_size() boundary.
488 // |------------------new_region---------------------------------|
489 // |----bottom_region--|---intersection---|------top_region------|
490 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
491                                     MemRegion* bottom_region, MemRegion *top_region) {
492   // Is there bottom?
493   if (new_region.start() < intersection.start()) { // Yes
494     // Try to coalesce small pages into a large one.
495     if (UseLargePages && page_size() >= alignment()) {
496       HeapWord* p = align_up(intersection.start(), alignment());
497       if (new_region.contains(p)
498           && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
499         if (intersection.contains(p)) {
500           intersection = MemRegion(p, intersection.end());
501         } else {
502           intersection = MemRegion(p, p);
503         }
504       }
505     }
506     *bottom_region = MemRegion(new_region.start(), intersection.start());
507   } else {
508     *bottom_region = MemRegion();
509   }
510 
511   // Is there top?
512   if (intersection.end() < new_region.end()) { // Yes
513     // Try to coalesce small pages into a large one.
514     if (UseLargePages && page_size() >= alignment()) {
515       HeapWord* p = align_down(intersection.end(), alignment());
516       if (new_region.contains(p)
517           && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
518         if (intersection.contains(p)) {
519           intersection = MemRegion(intersection.start(), p);
520         } else {
521           intersection = MemRegion(p, p);
522         }
523       }
524     }
525     *top_region = MemRegion(intersection.end(), new_region.end());
526   } else {
527     *top_region = MemRegion();
528   }
529 }
530 
531 // Try to merge the invalid region with the bottom or top region by decreasing
532 // the intersection area. Return the invalid_region aligned to the page_size()
533 // boundary if it's inside the intersection. Return non-empty invalid_region
534 // if it lies inside the intersection (also page-aligned).
535 // |------------------new_region---------------------------------|
536 // |----------------|-------invalid---|--------------------------|
537 // |----bottom_region--|---intersection---|------top_region------|
538 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
539                                      MemRegion *invalid_region) {
540   if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
541     *intersection = MemRegion(invalid_region->end(), intersection->end());
542     *invalid_region = MemRegion();
543   } else
544     if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
545       *intersection = MemRegion(intersection->start(), invalid_region->start());
546       *invalid_region = MemRegion();
547     } else
548       if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
549         *intersection = MemRegion(new_region.start(), new_region.start());
550         *invalid_region = MemRegion();
551       } else
552         if (intersection->contains(invalid_region)) {
553             // That's the only case we have to make an additional bias_region() call.
554             HeapWord* start = invalid_region->start();
555             HeapWord* end = invalid_region->end();
556             if (UseLargePages && page_size() >= alignment()) {
557               HeapWord *p = align_down(start, alignment());
558               if (new_region.contains(p)) {
559                 start = p;
560               }
561               p = align_up(end, alignment());
562               if (new_region.contains(end)) {
563                 end = p;
564               }
565             }
566             if (intersection->start() > start) {
567               *intersection = MemRegion(start, intersection->end());
568             }
569             if (intersection->end() < end) {
570               *intersection = MemRegion(intersection->start(), end);
571             }
572             *invalid_region = MemRegion(start, end);
573         }
574 }
575 
576 void MutableNUMASpace::initialize(MemRegion mr,
577                                   bool clear_space,
578                                   bool mangle_space,
579                                   bool setup_pages,
580                                   WorkGang* pretouch_gang) {
581   assert(clear_space, "Reallocation will destroy data!");
582   assert(lgrp_spaces()->length() > 0, "There should be at least one space");
583 
584   MemRegion old_region = region(), new_region;
585   set_bottom(mr.start());
586   set_end(mr.end());
587   // Must always clear the space
588   clear(SpaceDecorator::DontMangle);
589 
590   // Compute chunk sizes
591   size_t prev_page_size = page_size();
592   set_page_size(UseLargePages ? alignment() : os::vm_page_size());
593   HeapWord* rounded_bottom = align_up(bottom(), page_size());
594   HeapWord* rounded_end = align_down(end(), page_size());
595   size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
596 
597   // Try small pages if the chunk size is too small
598   if (base_space_size_pages / lgrp_spaces()->length() == 0
599       && page_size() > (size_t)os::vm_page_size()) {
600     // Changing the page size below can lead to freeing of memory. So we fail initialization.
601     if (_must_use_large_pages) {
602       vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size");
603     }
604     set_page_size(os::vm_page_size());
605     rounded_bottom = align_up(bottom(), page_size());
606     rounded_end = align_down(end(), page_size());
607     base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
608   }
609   guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
610   set_base_space_size(base_space_size_pages);
611 
612   // Handle space resize
613   MemRegion top_region, bottom_region;
614   if (!old_region.equals(region())) {
615     new_region = MemRegion(rounded_bottom, rounded_end);
616     MemRegion intersection = new_region.intersection(old_region);
617     if (intersection.start() == NULL ||
618         intersection.end() == NULL   ||
619         prev_page_size > page_size()) { // If the page size got smaller we have to change
620                                         // the page size preference for the whole space.
621       intersection = MemRegion(new_region.start(), new_region.start());
622     }
623     select_tails(new_region, intersection, &bottom_region, &top_region);
624     bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
625     bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
626   }
627 
628   // Check if the space layout has changed significantly?
629   // This happens when the space has been resized so that either head or tail
630   // chunk became less than a page.
631   bool layout_valid = UseAdaptiveNUMAChunkSizing          &&
632                       current_chunk_size(0) > page_size() &&
633                       current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
634 
635 
636   for (int i = 0; i < lgrp_spaces()->length(); i++) {
637     LGRPSpace *ls = lgrp_spaces()->at(i);
638     MutableSpace *s = ls->space();
639     old_region = s->region();
640 
641     size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
642     if (i < lgrp_spaces()->length() - 1) {
643       if (!UseAdaptiveNUMAChunkSizing                                ||
644           (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
645            samples_count() < AdaptiveSizePolicyReadyThreshold) {
646         // No adaptation. Divide the space equally.
647         chunk_byte_size = default_chunk_size();
648       } else
649         if (!layout_valid || NUMASpaceResizeRate == 0) {
650           // Fast adaptation. If no space resize rate is set, resize
651           // the chunks instantly.
652           chunk_byte_size = adaptive_chunk_size(i, 0);
653         } else {
654           // Slow adaptation. Resize the chunks moving no more than
655           // NUMASpaceResizeRate bytes per collection.
656           size_t limit = NUMASpaceResizeRate /
657                          (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
658           chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
659         }
660 
661       assert(chunk_byte_size >= page_size(), "Chunk size too small");
662       assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
663     }
664 
665     if (i == 0) { // Bottom chunk
666       if (i != lgrp_spaces()->length() - 1) {
667         new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
668       } else {
669         new_region = MemRegion(bottom(), end());
670       }
671     } else
672       if (i < lgrp_spaces()->length() - 1) { // Middle chunks
673         MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
674         new_region = MemRegion(ps->end(),
675                                ps->end() + (chunk_byte_size >> LogHeapWordSize));
676       } else { // Top chunk
677         MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
678         new_region = MemRegion(ps->end(), end());
679       }
680     guarantee(region().contains(new_region), "Region invariant");
681 
682 
683     // The general case:
684     // |---------------------|--invalid---|--------------------------|
685     // |------------------new_region---------------------------------|
686     // |----bottom_region--|---intersection---|------top_region------|
687     //                     |----old_region----|
688     // The intersection part has all pages in place we don't need to migrate them.
689     // Pages for the top and bottom part should be freed and then reallocated.
690 
691     MemRegion intersection = old_region.intersection(new_region);
692 
693     if (intersection.start() == NULL || intersection.end() == NULL) {
694       intersection = MemRegion(new_region.start(), new_region.start());
695     }
696 
697     if (!os::numa_has_static_binding()) {
698       MemRegion invalid_region = ls->invalid_region().intersection(new_region);
699       // Invalid region is a range of memory that could've possibly
700       // been allocated on the other node. That's relevant only on Solaris where
701       // there is no static memory binding.
702       if (!invalid_region.is_empty()) {
703         merge_regions(new_region, &intersection, &invalid_region);
704         free_region(invalid_region);
705         ls->set_invalid_region(MemRegion());
706       }
707     }
708 
709     select_tails(new_region, intersection, &bottom_region, &top_region);
710 
711     if (!os::numa_has_static_binding()) {
712       // If that's a system with the first-touch policy then it's enough
713       // to free the pages.
714       free_region(bottom_region);
715       free_region(top_region);
716     } else {
717       // In a system with static binding we have to change the bias whenever
718       // we reshape the heap.
719       bias_region(bottom_region, ls->lgrp_id());
720       bias_region(top_region, ls->lgrp_id());
721     }
722 
723     // Clear space (set top = bottom) but never mangle.
724     s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
725 
726     set_adaptation_cycles(samples_count());
727   }
728 }
729 
730 // Set the top of the whole space.
731 // Mark the the holes in chunks below the top() as invalid.
732 void MutableNUMASpace::set_top(HeapWord* value) {
733   bool found_top = false;
734   for (int i = 0; i < lgrp_spaces()->length();) {
735     LGRPSpace *ls = lgrp_spaces()->at(i);
736     MutableSpace *s = ls->space();
737     HeapWord *top = MAX2(align_down(s->top(), page_size()), s->bottom());
738 
739     if (s->contains(value)) {
740       // Check if setting the chunk's top to a given value would create a hole less than
741       // a minimal object; assuming that's not the last chunk in which case we don't care.
742       if (i < lgrp_spaces()->length() - 1) {
743         size_t remainder = pointer_delta(s->end(), value);
744         const size_t min_fill_size = CollectedHeap::min_fill_size();
745         if (remainder < min_fill_size && remainder > 0) {
746           // Add a minimum size filler object; it will cross the chunk boundary.
747           CollectedHeap::fill_with_object(value, min_fill_size);
748           value += min_fill_size;
749           assert(!s->contains(value), "Should be in the next chunk");
750           // Restart the loop from the same chunk, since the value has moved
751           // to the next one.
752           continue;
753         }
754       }
755 
756       if (!os::numa_has_static_binding() && top < value && top < s->end()) {
757         ls->add_invalid_region(MemRegion(top, value));
758       }
759       s->set_top(value);
760       found_top = true;
761     } else {
762         if (found_top) {
763             s->set_top(s->bottom());
764         } else {
765           if (!os::numa_has_static_binding() && top < s->end()) {
766             ls->add_invalid_region(MemRegion(top, s->end()));
767           }
768           s->set_top(s->end());
769         }
770     }
771     i++;
772   }
773   MutableSpace::set_top(value);
774 }
775 
776 void MutableNUMASpace::clear(bool mangle_space) {
777   MutableSpace::set_top(bottom());
778   for (int i = 0; i < lgrp_spaces()->length(); i++) {
779     // Never mangle NUMA spaces because the mangling will
780     // bind the memory to a possibly unwanted lgroup.
781     lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
782   }
783 }
784 
785 /*
786    Linux supports static memory binding, therefore the most part of the
787    logic dealing with the possible invalid page allocation is effectively
788    disabled. Besides there is no notion of the home node in Linux. A
789    thread is allowed to migrate freely. Although the scheduler is rather
790    reluctant to move threads between the nodes. We check for the current
791    node every allocation. And with a high probability a thread stays on
792    the same node for some time allowing local access to recently allocated
793    objects.
794  */
795 
796 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
797   Thread* thr = Thread::current();
798   int lgrp_id = thr->lgrp_id();
799   if (lgrp_id == -1 || !os::numa_has_group_homing()) {
800     lgrp_id = os::numa_get_group_id();
801     thr->set_lgrp_id(lgrp_id);
802   }
803 
804   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
805   // It is possible that a new CPU has been hotplugged and
806   // we haven't reshaped the space accordingly.
807   if (i == -1) {
808     i = os::random() % lgrp_spaces()->length();
809   }
810   LGRPSpace *ls = lgrp_spaces()->at(i);
811   MutableSpace *s = ls->space();
812   HeapWord *p = s->cas_allocate(size);
813   if (p != NULL) {
814     size_t remainder = pointer_delta(s->end(), p + size);
815     if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
816       if (s->cas_deallocate(p, size)) {
817         // We were the last to allocate and created a fragment less than
818         // a minimal object.
819         p = NULL;
820       } else {
821         guarantee(false, "Deallocation should always succeed");
822       }
823     }
824   }
825   if (p != NULL) {
826     HeapWord* cur_top, *cur_chunk_top = p + size;
827     while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
828       if (Atomic::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) {
829         break;
830       }
831     }
832   }
833 
834   // Make the page allocation happen here if there is no static binding.
835   if (p != NULL && !os::numa_has_static_binding() ) {
836     for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
837       *(int*)i = 0;
838     }
839   }
840   if (p == NULL) {
841     ls->set_allocation_failed();
842   }
843   return p;
844 }
845 
846 void MutableNUMASpace::print_short_on(outputStream* st) const {
847   MutableSpace::print_short_on(st);
848   st->print(" (");
849   for (int i = 0; i < lgrp_spaces()->length(); i++) {
850     st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
851     lgrp_spaces()->at(i)->space()->print_short_on(st);
852     if (i < lgrp_spaces()->length() - 1) {
853       st->print(", ");
854     }
855   }
856   st->print(")");
857 }
858 
859 void MutableNUMASpace::print_on(outputStream* st) const {
860   MutableSpace::print_on(st);
861   for (int i = 0; i < lgrp_spaces()->length(); i++) {
862     LGRPSpace *ls = lgrp_spaces()->at(i);
863     st->print("    lgrp %d", ls->lgrp_id());
864     ls->space()->print_on(st);
865     if (NUMAStats) {
866       for (int i = 0; i < lgrp_spaces()->length(); i++) {
867         lgrp_spaces()->at(i)->accumulate_statistics(page_size());
868       }
869       st->print("    local/remote/unbiased/uncommitted: " SIZE_FORMAT "K/"
870                 SIZE_FORMAT "K/" SIZE_FORMAT "K/" SIZE_FORMAT
871                 "K, large/small pages: " SIZE_FORMAT "/" SIZE_FORMAT "\n",
872                 ls->space_stats()->_local_space / K,
873                 ls->space_stats()->_remote_space / K,
874                 ls->space_stats()->_unbiased_space / K,
875                 ls->space_stats()->_uncommited_space / K,
876                 ls->space_stats()->_large_pages,
877                 ls->space_stats()->_small_pages);
878     }
879   }
880 }
881 
882 void MutableNUMASpace::verify() {
883   // This can be called after setting an arbitrary value to the space's top,
884   // so an object can cross the chunk boundary. We ensure the parsability
885   // of the space and just walk the objects in linear fashion.
886   ensure_parsability();
887   MutableSpace::verify();
888 }
889 
890 // Scan pages and gather stats about page placement and size.
891 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
892   clear_space_stats();
893   char *start = (char*)align_up(space()->bottom(), page_size);
894   char* end = (char*)align_down(space()->end(), page_size);
895   if (start < end) {
896     for (char *p = start; p < end;) {
897       os::page_info info;
898       if (os::get_page_info(p, &info)) {
899         if (info.size > 0) {
900           if (info.size > (size_t)os::vm_page_size()) {
901             space_stats()->_large_pages++;
902           } else {
903             space_stats()->_small_pages++;
904           }
905           if (info.lgrp_id == lgrp_id()) {
906             space_stats()->_local_space += info.size;
907           } else {
908             space_stats()->_remote_space += info.size;
909           }
910           p += info.size;
911         } else {
912           p += os::vm_page_size();
913           space_stats()->_uncommited_space += os::vm_page_size();
914         }
915       } else {
916         return;
917       }
918     }
919   }
920   space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
921                                    pointer_delta(space()->end(), end, sizeof(char));
922 
923 }
924 
925 // Scan page_count pages and verify if they have the right size and right placement.
926 // If invalid pages are found they are freed in hope that subsequent reallocation
927 // will be more successful.
928 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
929 {
930   char* range_start = (char*)align_up(space()->bottom(), page_size);
931   char* range_end = (char*)align_down(space()->end(), page_size);
932 
933   if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
934     set_last_page_scanned(range_start);
935   }
936 
937   char *scan_start = last_page_scanned();
938   char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
939 
940   os::page_info page_expected, page_found;
941   page_expected.size = page_size;
942   page_expected.lgrp_id = lgrp_id();
943 
944   char *s = scan_start;
945   while (s < scan_end) {
946     char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
947     if (e == NULL) {
948       break;
949     }
950     if (e != scan_end) {
951       assert(e < scan_end, "e: " PTR_FORMAT " scan_end: " PTR_FORMAT, p2i(e), p2i(scan_end));
952 
953       if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
954           && page_expected.size != 0) {
955         os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
956       }
957       page_expected = page_found;
958     }
959     s = e;
960   }
961 
962   set_last_page_scanned(scan_end);
963 }