1 /*
  2  * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/parallel/mutableSpace.hpp"
 27 #include "gc/parallel/parallelScavengeHeap.hpp"
 28 #include "gc/parallel/psPromotionLAB.hpp"
 29 #include "memory/universe.hpp"
 30 #include "oops/oop.inline.hpp"
 31 
 32 size_t PSPromotionLAB::filler_header_size;
 33 
 34 // This is the shared initialization code. It sets up the basic pointers,
 35 // and allows enough extra space for a filler object. We call a virtual
 36 // method, "lab_is_valid()" to handle the different asserts the old/young
 37 // labs require.
 38 void PSPromotionLAB::initialize(MemRegion lab) {
 39   assert(lab_is_valid(lab), "Sanity");
 40 
 41   HeapWord* bottom = lab.start();
 42   HeapWord* end    = lab.end();
 43 
 44   set_bottom(bottom);
 45   set_end(end);
 46   set_top(bottom);
 47 
 48   // Initialize after VM starts up because header_size depends on compressed
 49   // oops.
 50   filler_header_size = align_object_size(typeArrayOopDesc::header_size(T_INT));
 51 
 52   // We can be initialized to a zero size!
 53   if (free() > 0) {
 54     if (ZapUnusedHeapArea) {
 55       debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
 56     }
 57 
 58     // NOTE! We need to allow space for a filler object.
 59     assert(lab.word_size() >= filler_header_size, "lab is too small");
 60     end = end - filler_header_size;
 61     set_end(end);
 62 
 63     _state = needs_flush;
 64   } else {
 65     _state = zero_size;
 66   }
 67 
 68   assert(this->top() <= this->end(), "pointers out of order");
 69 }
 70 
 71 // Fill all remaining lab space with an unreachable object.
 72 // The goal is to leave a contiguous parseable span of objects.
 73 void PSPromotionLAB::flush() {
 74   assert(_state != flushed, "Attempt to flush PLAB twice");
 75   assert(top() <= end(), "pointers out of order");
 76 
 77   // If we were initialized to a zero sized lab, there is
 78   // nothing to flush
 79   if (_state == zero_size)
 80     return;
 81 
 82   // PLAB's never allocate the last aligned_header_size
 83   // so they can always fill with an array.
 84   HeapWord* tlab_end = end() + filler_header_size;
 85   typeArrayOop filler_oop = (typeArrayOop) cast_to_oop(top());
 86   filler_oop->set_mark(markWord::prototype());
 87   filler_oop->set_klass(Universe::intArrayKlassObj());
 88   const size_t array_length =
 89     pointer_delta(tlab_end, top()) - typeArrayOopDesc::header_size(T_INT);
 90   assert( (array_length * (HeapWordSize/sizeof(jint))) < (size_t)max_jint, "array too big in PSPromotionLAB");
 91   filler_oop->set_length((int)(array_length * (HeapWordSize/sizeof(jint))));
 92 
 93 #ifdef ASSERT
 94   // Note that we actually DO NOT want to use the aligned header size!
 95   HeapWord* elt_words = cast_from_oop<HeapWord*>(filler_oop) + typeArrayOopDesc::header_size(T_INT);
 96   Copy::fill_to_words(elt_words, array_length, 0xDEAABABE);
 97 #endif
 98 
 99   set_bottom(NULL);
100   set_end(NULL);
101   set_top(NULL);
102 
103   _state = flushed;
104 }
105 
106 bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
107   assert(ParallelScavengeHeap::heap()->is_in(obj), "Object outside heap");
108 
109   if (contains(obj)) {
110     HeapWord* object_end = obj + obj_size;
111     assert(object_end == top(), "Not matching last allocation");
112 
113     set_top(obj);
114     return true;
115   }
116 
117   return false;
118 }
119 
120 // Fill all remaining lab space with an unreachable object.
121 // The goal is to leave a contiguous parseable span of objects.
122 void PSOldPromotionLAB::flush() {
123   assert(_state != flushed, "Attempt to flush PLAB twice");
124   assert(top() <= end(), "pointers out of order");
125 
126   if (_state == zero_size)
127     return;
128 
129   HeapWord* obj = top();
130 
131   PSPromotionLAB::flush();
132 
133   assert(_start_array != NULL, "Sanity");
134 
135   _start_array->allocate_block(obj);
136 }
137 
138 #ifdef ASSERT
139 
140 bool PSYoungPromotionLAB::lab_is_valid(MemRegion lab) {
141   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
142   MutableSpace* to_space = heap->young_gen()->to_space();
143   MemRegion used = to_space->used_region();
144   if (used.contains(lab)) {
145     return true;
146   }
147 
148   return false;
149 }
150 
151 bool PSOldPromotionLAB::lab_is_valid(MemRegion lab) {
152   assert(_start_array->covered_region().contains(lab), "Sanity");
153 
154   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
155   PSOldGen* old_gen = heap->old_gen();
156   MemRegion used = old_gen->object_space()->used_region();
157 
158   if (used.contains(lab)) {
159     return true;
160   }
161 
162   return false;
163 }
164 
165 #endif /* ASSERT */