1 /* 2 * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/parallel/mutableSpace.hpp" 27 #include "gc/parallel/parallelScavengeHeap.hpp" 28 #include "gc/parallel/psPromotionLAB.hpp" 29 #include "memory/universe.hpp" 30 #include "oops/oop.inline.hpp" 31 32 size_t PSPromotionLAB::filler_header_size; 33 34 // This is the shared initialization code. It sets up the basic pointers, 35 // and allows enough extra space for a filler object. We call a virtual 36 // method, "lab_is_valid()" to handle the different asserts the old/young 37 // labs require. 38 void PSPromotionLAB::initialize(MemRegion lab) { 39 assert(lab_is_valid(lab), "Sanity"); 40 41 HeapWord* bottom = lab.start(); 42 HeapWord* end = lab.end(); 43 44 set_bottom(bottom); 45 set_end(end); 46 set_top(bottom); 47 48 // Initialize after VM starts up because header_size depends on compressed 49 // oops. 50 filler_header_size = align_object_size((arrayOopDesc::base_offset_in_bytes(T_INT) + BytesPerWord) / BytesPerWord); 51 52 // We can be initialized to a zero size! 53 if (free() > 0) { 54 if (ZapUnusedHeapArea) { 55 debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord)); 56 } 57 58 // NOTE! We need to allow space for a filler object. 59 assert(lab.word_size() >= filler_header_size, "lab is too small"); 60 end = end - filler_header_size; 61 set_end(end); 62 63 _state = needs_flush; 64 } else { 65 _state = zero_size; 66 } 67 68 assert(this->top() <= this->end(), "pointers out of order"); 69 } 70 71 // Fill all remaining lab space with an unreachable object. 72 // The goal is to leave a contiguous parseable span of objects. 73 void PSPromotionLAB::flush() { 74 assert(_state != flushed, "Attempt to flush PLAB twice"); 75 assert(top() <= end(), "pointers out of order"); 76 77 // If we were initialized to a zero sized lab, there is 78 // nothing to flush 79 if (_state == zero_size) 80 return; 81 82 // PLAB's never allocate the last aligned_header_size 83 // so they can always fill with an array. 84 HeapWord* tlab_end = end() + filler_header_size; 85 typeArrayOop filler_oop = (typeArrayOop) cast_to_oop(top()); 86 if (UseCompactObjectHeaders) { 87 filler_oop->set_mark(Universe::intArrayKlassObj()->prototype_header()); 88 } else { 89 filler_oop->set_mark(markWord::prototype()); 90 filler_oop->set_klass(Universe::intArrayKlassObj()); 91 } 92 int header_size = arrayOopDesc::base_offset_in_bytes(T_INT); 93 const size_t array_length_bytes = pointer_delta(tlab_end, top(), 1) - header_size; 94 assert((array_length_bytes / sizeof(jint)) < (size_t)max_jint, "array too big in PSPromotionLAB"); 95 filler_oop->set_length((int)(array_length_bytes / sizeof(jint))); 96 97 #ifdef ASSERT 98 // Note that we actually DO NOT want to use the aligned header size! 99 const size_t array_length_words = pointer_delta(tlab_end, top()) - heap_word_size(header_size); 100 HeapWord* elt_words = cast_from_oop<HeapWord*>(filler_oop) + heap_word_size(header_size); 101 Copy::fill_to_words(elt_words, array_length_words, 0xDEAABABE); 102 #endif 103 104 set_bottom(NULL); 105 set_end(NULL); 106 set_top(NULL); 107 108 _state = flushed; 109 } 110 111 bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) { 112 assert(ParallelScavengeHeap::heap()->is_in(obj), "Object outside heap"); 113 114 if (contains(obj)) { 115 HeapWord* object_end = obj + obj_size; 116 assert(object_end == top(), "Not matching last allocation"); 117 118 set_top(obj); 119 return true; 120 } 121 122 return false; 123 } 124 125 // Fill all remaining lab space with an unreachable object. 126 // The goal is to leave a contiguous parseable span of objects. 127 void PSOldPromotionLAB::flush() { 128 assert(_state != flushed, "Attempt to flush PLAB twice"); 129 assert(top() <= end(), "pointers out of order"); 130 131 if (_state == zero_size) 132 return; 133 134 HeapWord* obj = top(); 135 136 PSPromotionLAB::flush(); 137 138 assert(_start_array != NULL, "Sanity"); 139 140 _start_array->allocate_block(obj); 141 } 142 143 #ifdef ASSERT 144 145 bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) { 146 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 147 MutableSpace* to_space = heap->young_gen()->to_space(); 148 MemRegion used = to_space->used_region(); 149 if (used.contains(lab)) { 150 return true; 151 } 152 153 return false; 154 } 155 156 bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) { 157 assert(_start_array->covered_region().contains(lab), "Sanity"); 158 159 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 160 PSOldGen* old_gen = heap->old_gen(); 161 MemRegion used = old_gen->object_space()->used_region(); 162 163 if (used.contains(lab)) { 164 return true; 165 } 166 167 return false; 168 } 169 170 #endif /* ASSERT */