1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_PLAB_HPP 26 #define SHARE_GC_SHARED_PLAB_HPP 27 28 #include "gc/shared/gcUtil.hpp" 29 #include "memory/allocation.hpp" 30 #include "utilities/globalDefinitions.hpp" 31 32 // Forward declarations. 33 class PLABStats; 34 35 // A per-thread allocation buffer used during GC. 36 class PLAB: public CHeapObj<mtGC> { 37 protected: 38 char head[32]; 39 size_t _word_sz; // In HeapWord units 40 HeapWord* _bottom; 41 HeapWord* _top; 42 HeapWord* _end; // Last allocatable address + 1 43 HeapWord* _hard_end; // _end + AlignmentReserve 44 // In support of ergonomic sizing of PLAB's 45 size_t _allocated; // in HeapWord units 46 size_t _wasted; // in HeapWord units 47 size_t _undo_wasted; 48 char tail[32]; 49 static size_t AlignmentReserve; 50 51 // Force future allocations to fail and queries for contains() 52 // to return false. Returns the amount of unused space in this PLAB. 53 size_t invalidate() { 54 _end = _hard_end; 55 size_t remaining = pointer_delta(_end, _top); // Calculate remaining space. 56 _top = _end; // Force future allocations to fail. 57 _bottom = _end; // Force future contains() queries to return false. 58 return remaining; 59 } 60 61 // Fill in remaining space with a dummy object and invalidate the PLAB. Returns 62 // the amount of remaining space. 63 size_t retire_internal(); 64 65 void add_undo_waste(HeapWord* obj, size_t word_sz); 66 67 // Undo the last allocation in the buffer, which is required to be of the 68 // "obj" of the given "word_sz". 69 void undo_last_allocation(HeapWord* obj, size_t word_sz); 70 71 public: 72 // Initializes the buffer to be empty, but with the given "word_sz". 73 // Must get initialized with "set_buf" for an allocation to succeed. 74 PLAB(size_t word_sz); 75 76 static size_t size_required_for_allocation(size_t word_size) { return word_size + AlignmentReserve; } 77 78 // Minimum PLAB size. 79 static size_t min_size(); 80 // Maximum PLAB size. 81 static size_t max_size(); 82 83 // If an allocation of the given "word_sz" can be satisfied within the 84 // buffer, do the allocation, returning a pointer to the start of the 85 // allocated block. If the allocation request cannot be satisfied, 86 // return NULL. 87 HeapWord* allocate(size_t word_sz) { 88 HeapWord* res = _top; 89 if (pointer_delta(_end, _top) >= word_sz) { 90 _top = _top + word_sz; 91 return res; 92 } else { 93 return NULL; 94 } 95 } 96 97 // Undo any allocation in the buffer, which is required to be of the 98 // "obj" of the given "word_sz". 99 void undo_allocation(HeapWord* obj, size_t word_sz); 100 101 // The total (word) size of the buffer, including both allocated and 102 // unallocated space. 103 size_t word_sz() { return _word_sz; } 104 105 size_t waste() { return _wasted; } 106 size_t undo_waste() { return _undo_wasted; } 107 108 // The number of words of unallocated space remaining in the buffer. 109 size_t words_remaining() { 110 assert(_end >= _top, "Negative buffer"); 111 return pointer_delta(_end, _top, HeapWordSize); 112 } 113 114 bool contains(void* addr) { 115 return (void*)_bottom <= addr && addr < (void*)_hard_end; 116 } 117 118 // Sets the space of the buffer to be [buf, space+word_sz()). 119 void set_buf(HeapWord* buf, size_t new_word_sz) { 120 assert(new_word_sz > AlignmentReserve, "Too small"); 121 _word_sz = new_word_sz; 122 123 _bottom = buf; 124 _top = _bottom; 125 _hard_end = _bottom + word_sz(); 126 _end = _hard_end - AlignmentReserve; 127 assert(_end >= _top, "Negative buffer"); 128 // In support of ergonomic sizing 129 _allocated += word_sz(); 130 } 131 132 // Flush allocation statistics into the given PLABStats supporting ergonomic 133 // sizing of PLAB's and retire the current buffer. To be called at the end of 134 // GC. 135 void flush_and_retire_stats(PLABStats* stats); 136 137 // Fills in the unallocated portion of the buffer with a garbage object and updates 138 // statistics. To be called during GC. 139 void retire(); 140 }; 141 142 // PLAB book-keeping. 143 class PLABStats : public CHeapObj<mtGC> { 144 protected: 145 const char* _description; // Identifying string. 146 147 size_t _allocated; // Total allocated 148 size_t _wasted; // of which wasted (internal fragmentation) 149 size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size) 150 size_t _unused; // Unused in last buffer 151 size_t _default_plab_sz; 152 size_t _desired_net_plab_sz;// Output of filter (below), suitably trimmed and quantized 153 AdaptiveWeightedAverage 154 _filter; // Integrator with decay 155 156 virtual void reset() { 157 _allocated = 0; 158 _wasted = 0; 159 _undo_wasted = 0; 160 _unused = 0; 161 } 162 163 virtual void log_plab_allocation(); 164 virtual void log_sizing(size_t calculated, size_t net_desired); 165 166 // helper for adjust_desired_plab_sz(). 167 virtual size_t compute_desired_plab_sz(); 168 169 public: 170 PLABStats(const char* description, size_t default_per_thread_plab_size, size_t desired_net_plab_sz, unsigned wt) : 171 _description(description), 172 _allocated(0), 173 _wasted(0), 174 _undo_wasted(0), 175 _unused(0), 176 _default_plab_sz(default_per_thread_plab_size), 177 _desired_net_plab_sz(desired_net_plab_sz), 178 _filter(wt) 179 { } 180 181 virtual ~PLABStats() { } 182 183 size_t allocated() const { return _allocated; } 184 size_t wasted() const { return _wasted; } 185 size_t unused() const { return _unused; } 186 size_t used() const { return allocated() - (wasted() + unused()); } 187 size_t undo_wasted() const { return _undo_wasted; } 188 189 static const size_t min_size() { 190 return PLAB::min_size(); 191 } 192 193 static const size_t max_size() { 194 return PLAB::max_size(); 195 } 196 197 // Calculates plab size for current number of gc worker threads. 198 size_t desired_plab_sz(uint no_of_gc_workers); 199 200 // Updates the current desired PLAB size. Computes the new desired PLAB size with one gc worker thread, 201 // updates _desired_plab_sz and clears sensor accumulators. 202 void adjust_desired_plab_sz(); 203 204 inline void add_allocated(size_t v); 205 206 inline void add_unused(size_t v); 207 208 inline void add_wasted(size_t v); 209 210 inline void add_undo_wasted(size_t v); 211 }; 212 213 #endif // SHARE_GC_SHARED_PLAB_HPP