1 /*
  2  * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
 26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
 27 
 28 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 29 
 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 31 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 32 #include "runtime/atomic.hpp"
 33 
 34 // If next available memory is not aligned on address that is multiple of alignment, fill the empty space
 35 // so that returned object is aligned on an address that is a multiple of alignment_in_words.  Requested
 36 // size is in words.  It is assumed that this->is_old().  A pad object is allocated, filled, and registered
 37 // if necessary to assure the new allocation is properly aligned.
 38 HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocRequest req, size_t alignment_in_bytes) {
 39   shenandoah_assert_heaplocked_or_safepoint();
 40   assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
 41   assert(is_old(), "aligned allocations are only taken from OLD regions to support PLABs");
 42 
 43   HeapWord* obj = top();
 44   uintptr_t addr_as_int = (uintptr_t) obj;
 45 
 46   size_t unalignment_bytes = addr_as_int % alignment_in_bytes;
 47   size_t unalignment_words = unalignment_bytes / HeapWordSize;
 48   if (pointer_delta(end(), obj + unalignment_words) >= size) {
 49     if (unalignment_words > 0) {
 50       size_t pad_words = (alignment_in_bytes / HeapWordSize) - unalignment_words;
 51       if (pad_words < ShenandoahHeap::min_fill_size()) {
 52         pad_words += (alignment_in_bytes / HeapWordSize);
 53       }
 54       ShenandoahHeap::fill_with_object(obj, pad_words);
 55       ShenandoahHeap::heap()->card_scan()->register_object(obj);
 56       obj += pad_words;
 57     }
 58 
 59     make_regular_allocation(req.affiliation());
 60     adjust_alloc_metadata(req.type(), size);
 61 
 62     HeapWord* new_top = obj + size;
 63     set_top(new_top);
 64     assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
 65     assert(is_aligned(obj, alignment_in_bytes), "obj is not aligned: " PTR_FORMAT, p2i(obj));
 66 
 67     return obj;
 68   } else {
 69     return NULL;
 70   }
 71 }
 72 
 73 HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest req) {
 74   shenandoah_assert_heaplocked_or_safepoint();
 75   assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
 76 
 77   HeapWord* obj = top();
 78   if (pointer_delta(end(), obj) >= size) {
 79     make_regular_allocation(req.affiliation());
 80     adjust_alloc_metadata(req.type(), size);
 81 
 82     HeapWord* new_top = obj + size;
 83     set_top(new_top);
 84 
 85     assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
 86     assert(is_object_aligned(obj),     "obj is not aligned: "       PTR_FORMAT, p2i(obj));
 87 
 88     return obj;
 89   } else {
 90     return NULL;
 91   }
 92 }
 93 
 94 inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
 95   switch (type) {
 96     case ShenandoahAllocRequest::_alloc_shared:
 97     case ShenandoahAllocRequest::_alloc_shared_gc:
 98       // Counted implicitly by tlab/gclab allocs
 99       break;
100     case ShenandoahAllocRequest::_alloc_tlab:
101       _tlab_allocs += size;
102       break;
103     case ShenandoahAllocRequest::_alloc_gclab:
104       _gclab_allocs += size;
105       break;
106     case ShenandoahAllocRequest::_alloc_plab:
107       _plab_allocs += size;
108       break;
109     default:
110       ShouldNotReachHere();
111   }
112 }
113 
114 inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) {
115   internal_increase_live_data(s);
116 }
117 
118 inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) {
119   internal_increase_live_data(s);
120   if (ShenandoahPacing) {
121     ShenandoahHeap::heap()->pacer()->report_mark(s);
122   }
123 }
124 
125 inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
126   size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed);
127 #ifdef ASSERT
128   size_t live_bytes = new_live_data * HeapWordSize;
129   size_t used_bytes = used();
130   assert(live_bytes <= used_bytes,
131          "%s Region " SIZE_FORMAT " can't have more live data than used: " SIZE_FORMAT ", " SIZE_FORMAT " after adding " SIZE_FORMAT,
132          affiliation_name(affiliation()), index(), live_bytes, used_bytes, s * HeapWordSize);
133 #endif
134 }
135 
136 inline void ShenandoahHeapRegion::clear_live_data() {
137   Atomic::store(&_live_data, (size_t)0);
138 }
139 
140 inline size_t ShenandoahHeapRegion::get_live_data_words() const {
141   return Atomic::load(&_live_data);
142 }
143 
144 inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
145   return get_live_data_words() * HeapWordSize;
146 }
147 
148 inline bool ShenandoahHeapRegion::has_live() const {
149   return get_live_data_words() != 0;
150 }
151 
152 inline size_t ShenandoahHeapRegion::garbage() const {
153   assert(used() >= get_live_data_bytes(),
154          "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
155          get_live_data_bytes(), used());
156 
157   size_t result = used() - get_live_data_bytes();
158   return result;
159 }
160 
161 inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const {
162   HeapWord* watermark = Atomic::load_acquire(&_update_watermark);
163   assert(bottom() <= watermark && watermark <= top(), "within bounds");
164   return watermark;
165 }
166 
167 inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) {
168   assert(bottom() <= w && w <= top(), "within bounds");
169   Atomic::release_store(&_update_watermark, w);
170 }
171 
172 // Fast version that avoids synchronization, only to be used at safepoints.
173 inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) {
174   assert(bottom() <= w && w <= top(), "within bounds");
175   assert(SafepointSynchronize::is_at_safepoint(), "Should be at Shenandoah safepoint");
176   _update_watermark = w;
177 }
178 
179 inline void ShenandoahHeapRegion::clear_young_lab_flags() {
180   _has_young_lab = false;
181 }
182 
183 inline void ShenandoahHeapRegion::set_young_lab_flag() {
184   _has_young_lab = true;
185 }
186 
187 inline bool ShenandoahHeapRegion::has_young_lab_flag() {
188   return _has_young_lab;
189 }
190 
191 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP