< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp

Print this page

  1 /*
  2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.

  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 29 
 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 31 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 32 #include "runtime/atomic.hpp"
 33 
 34 HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Type type) {







































































 35   shenandoah_assert_heaplocked_or_safepoint();
 36   assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
 37 
 38   HeapWord* obj = top();
 39   if (pointer_delta(end(), obj) >= size) {
 40     make_regular_allocation();
 41     adjust_alloc_metadata(type, size);
 42 
 43     HeapWord* new_top = obj + size;
 44     set_top(new_top);
 45 
 46     assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
 47     assert(is_object_aligned(obj),     "obj is not aligned: "       PTR_FORMAT, p2i(obj));
 48 
 49     return obj;
 50   } else {
 51     return nullptr;
 52   }
 53 }
 54 
 55 inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
 56   switch (type) {
 57     case ShenandoahAllocRequest::_alloc_shared:
 58     case ShenandoahAllocRequest::_alloc_shared_gc:
 59       // Counted implicitly by tlab/gclab allocs
 60       break;
 61     case ShenandoahAllocRequest::_alloc_tlab:
 62       _tlab_allocs += size;
 63       break;
 64     case ShenandoahAllocRequest::_alloc_gclab:
 65       _gclab_allocs += size;
 66       break;



 67     default:
 68       ShouldNotReachHere();
 69   }
 70 }
 71 
 72 inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) {
 73   internal_increase_live_data(s);
 74 }
 75 
 76 inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
 77   internal_increase_live_data(s);
 78   if (ShenandoahPacing) {
 79     ShenandoahHeap::heap()->pacer()->report_mark(s);
 80   }
 81 }
 82 
 83 inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
 84   size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed);
 85 #ifdef ASSERT
 86   size_t live_bytes = new_live_data * HeapWordSize;
 87   size_t used_bytes = used();
 88   assert(live_bytes <= used_bytes,
 89          "can't have more live data than used: " SIZE_FORMAT ", " SIZE_FORMAT, live_bytes, used_bytes);
 90 #endif
 91 }
 92 
 93 inline void ShenandoahHeapRegion::clear_live_data() {
 94   Atomic::store(&_live_data, (size_t)0);
 95 }
 96 
 97 inline size_t ShenandoahHeapRegion::get_live_data_words() const {
 98   return Atomic::load(&_live_data);
 99 }
100 
101 inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
102   return get_live_data_words() * HeapWordSize;
103 }
104 
105 inline bool ShenandoahHeapRegion::has_live() const {
106   return get_live_data_words() != 0;
107 }
108 
109 inline size_t ShenandoahHeapRegion::garbage() const {
110   assert(used() >= get_live_data_bytes(),
111          "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
112          get_live_data_bytes(), used());
113 
114   size_t result = used() - get_live_data_bytes();
115   return result;
116 }
117 











118 inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const {
119   HeapWord* watermark = Atomic::load_acquire(&_update_watermark);
120   assert(bottom() <= watermark && watermark <= top(), "within bounds");
121   return watermark;
122 }
123 
124 inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) {
125   assert(bottom() <= w && w <= top(), "within bounds");
126   Atomic::release_store(&_update_watermark, w);
127 }
128 
129 // Fast version that avoids synchronization, only to be used at safepoints.
130 inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) {
131   assert(bottom() <= w && w <= top(), "within bounds");
132   assert(SafepointSynchronize::is_at_safepoint(), "Should be at Shenandoah safepoint");
133   _update_watermark = w;
134 }
135 






























136 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP

  1 /*
  2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
 28 
 29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 30 
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 33 #include "runtime/atomic.hpp"
 34 
 35 // If next available memory is not aligned on address that is multiple of alignment, fill the empty space
 36 // so that returned object is aligned on an address that is a multiple of alignment_in_words.  Requested
 37 // size is in words.  It is assumed that this->is_old().  A pad object is allocated, filled, and registered
 38 // if necessary to assure the new allocation is properly aligned.
 39 HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocRequest &req, size_t alignment_in_bytes) {
 40   shenandoah_assert_heaplocked_or_safepoint();
 41   assert(req.is_lab_alloc(), "allocate_aligned() only applies to LAB allocations");
 42   assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
 43   assert(is_old(), "aligned allocations are only taken from OLD regions to support PLABs");
 44 
 45   HeapWord* orig_top = top();
 46   size_t addr_as_int = (uintptr_t) orig_top;
 47 
 48   // unalignment_bytes is the amount by which current top() exceeds the desired alignment point.  We subtract this amount
 49   // from alignment_in_bytes to determine padding required to next alignment point.
 50 
 51   // top is HeapWord-aligned so unalignment_bytes is a multiple of HeapWordSize
 52   size_t unalignment_bytes = addr_as_int % alignment_in_bytes;
 53   size_t unalignment_words = unalignment_bytes / HeapWordSize;
 54 
 55   size_t pad_words;
 56   HeapWord* aligned_obj;
 57   if (unalignment_words > 0) {
 58     pad_words = (alignment_in_bytes / HeapWordSize) - unalignment_words;
 59     if (pad_words < ShenandoahHeap::min_fill_size()) {
 60       pad_words += (alignment_in_bytes / HeapWordSize);
 61     }
 62     aligned_obj = orig_top + pad_words;
 63   } else {
 64     pad_words = 0;
 65     aligned_obj = orig_top;
 66   }
 67 
 68   if (pointer_delta(end(), aligned_obj) < size) {
 69     size = pointer_delta(end(), aligned_obj);
 70     // Force size to align on multiple of alignment_in_bytes
 71     size_t byte_size = size * HeapWordSize;
 72     size_t excess_bytes = byte_size % alignment_in_bytes;
 73     // Note: excess_bytes is a multiple of HeapWordSize because it is the difference of HeapWord-aligned end
 74     //       and proposed HeapWord-aligned object address.
 75     if (excess_bytes > 0) {
 76       size -= excess_bytes / HeapWordSize;
 77     }
 78   }
 79 
 80   // Both originally requested size and adjusted size must be properly aligned
 81   assert ((size * HeapWordSize) % alignment_in_bytes == 0, "Size must be multiple of alignment constraint");
 82   if (size >= req.min_size()) {
 83     // Even if req.min_size() is not a multiple of card size, we know that size is.
 84     if (pad_words > 0) {
 85       assert(pad_words >= ShenandoahHeap::min_fill_size(), "pad_words expanded above to meet size constraint");
 86       ShenandoahHeap::fill_with_object(orig_top, pad_words);
 87       ShenandoahHeap::heap()->card_scan()->register_object(orig_top);
 88     }
 89 
 90     make_regular_allocation(req.affiliation());
 91     adjust_alloc_metadata(req.type(), size);
 92 
 93     HeapWord* new_top = aligned_obj + size;
 94     assert(new_top <= end(), "PLAB cannot span end of heap region");
 95     set_top(new_top);
 96     req.set_actual_size(size);
 97     req.set_waste(pad_words);
 98     assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
 99     assert(is_aligned(aligned_obj, alignment_in_bytes), "obj is not aligned: " PTR_FORMAT, p2i(aligned_obj));
100     return aligned_obj;
101   } else {
102     return nullptr;
103   }
104 }
105 
106 HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest req) {
107   shenandoah_assert_heaplocked_or_safepoint();
108   assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
109 
110   HeapWord* obj = top();
111   if (pointer_delta(end(), obj) >= size) {
112     make_regular_allocation(req.affiliation());
113     adjust_alloc_metadata(req.type(), size);
114 
115     HeapWord* new_top = obj + size;
116     set_top(new_top);
117 
118     assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
119     assert(is_object_aligned(obj),     "obj is not aligned: "       PTR_FORMAT, p2i(obj));
120 
121     return obj;
122   } else {
123     return nullptr;
124   }
125 }
126 
127 inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
128   switch (type) {
129     case ShenandoahAllocRequest::_alloc_shared:
130     case ShenandoahAllocRequest::_alloc_shared_gc:
131       // Counted implicitly by tlab/gclab allocs
132       break;
133     case ShenandoahAllocRequest::_alloc_tlab:
134       _tlab_allocs += size;
135       break;
136     case ShenandoahAllocRequest::_alloc_gclab:
137       _gclab_allocs += size;
138       break;
139     case ShenandoahAllocRequest::_alloc_plab:
140       _plab_allocs += size;
141       break;
142     default:
143       ShouldNotReachHere();
144   }
145 }
146 
147 inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) {
148   internal_increase_live_data(s);
149 }
150 
151 inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
152   internal_increase_live_data(s);
153   if (ShenandoahPacing) {
154     ShenandoahHeap::heap()->pacer()->report_mark(s);
155   }
156 }
157 
158 inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
159   size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed);






160 }
161 
162 inline void ShenandoahHeapRegion::clear_live_data() {
163   Atomic::store(&_live_data, (size_t)0);
164 }
165 
166 inline size_t ShenandoahHeapRegion::get_live_data_words() const {
167   return Atomic::load(&_live_data);
168 }
169 
170 inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
171   return get_live_data_words() * HeapWordSize;
172 }
173 
174 inline bool ShenandoahHeapRegion::has_live() const {
175   return get_live_data_words() != 0;
176 }
177 
178 inline size_t ShenandoahHeapRegion::garbage() const {
179   assert(used() >= get_live_data_bytes(),
180          "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
181          get_live_data_bytes(), used());
182 
183   size_t result = used() - get_live_data_bytes();
184   return result;
185 }
186 
187 inline size_t ShenandoahHeapRegion::garbage_before_padded_for_promote() const {
188   assert(get_top_before_promote() != nullptr, "top before promote should not equal null");
189   size_t used_before_promote = byte_size(bottom(), get_top_before_promote());
190   assert(used_before_promote >= get_live_data_bytes(),
191          "Live Data must be a subset of used before promotion live: " SIZE_FORMAT " used: " SIZE_FORMAT,
192          get_live_data_bytes(), used_before_promote);
193   size_t result = used_before_promote - get_live_data_bytes();
194   return result;
195 
196 }
197 
198 inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const {
199   HeapWord* watermark = Atomic::load_acquire(&_update_watermark);
200   assert(bottom() <= watermark && watermark <= top(), "within bounds");
201   return watermark;
202 }
203 
204 inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) {
205   assert(bottom() <= w && w <= top(), "within bounds");
206   Atomic::release_store(&_update_watermark, w);
207 }
208 
209 // Fast version that avoids synchronization, only to be used at safepoints.
210 inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) {
211   assert(bottom() <= w && w <= top(), "within bounds");
212   assert(SafepointSynchronize::is_at_safepoint(), "Should be at Shenandoah safepoint");
213   _update_watermark = w;
214 }
215 
216 inline ShenandoahAffiliation ShenandoahHeapRegion::affiliation() const {
217   return ShenandoahHeap::heap()->region_affiliation(this);
218 }
219 
220 inline const char* ShenandoahHeapRegion::affiliation_name() const {
221   return shenandoah_affiliation_name(affiliation());
222 }
223 
224 inline bool ShenandoahHeapRegion::is_young() const {
225   return affiliation() == YOUNG_GENERATION;
226 }
227 
228 inline bool ShenandoahHeapRegion::is_old() const {
229   return affiliation() == OLD_GENERATION;
230 }
231 
232 inline bool ShenandoahHeapRegion::is_affiliated() const {
233   return affiliation() != FREE;
234 }
235 
236 inline void ShenandoahHeapRegion::save_top_before_promote() {
237   _top_before_promoted = _top;
238 }
239 
240 inline void ShenandoahHeapRegion::restore_top_before_promote() {
241   _top = _top_before_promoted;
242   _top_before_promoted = nullptr;
243  }
244 
245 
246 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
< prev index next >