1 /*
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
27 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
28 #include "gc/shenandoah/shenandoahUncommitThread.hpp"
29 #include "logging/log.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "utilities/events.hpp"
32
33 ShenandoahUncommitThread::ShenandoahUncommitThread(ShenandoahHeap* heap)
34 : _heap(heap),
35 _uncommit_lock(Mutex::safepoint - 2, "ShenandoahUncommit_lock", true) {
36 set_name("Shenandoah Uncommit Thread");
37 create_and_start();
38
39 // Allow uncommits. This is managed by the control thread during a GC.
40 _uncommit_allowed.set();
41 }
42
43 void ShenandoahUncommitThread::run_service() {
44 assert(ShenandoahUncommit, "Thread should only run when uncommit is enabled");
45
46 // poll_interval avoids constantly polling regions for shrinking.
47 // Having an interval 10x lower than the delay would mean we hit the
48 // shrinking with lag of less than 1/10-th of true delay.
49 // ShenandoahUncommitDelay is in millis, but shrink_period is in seconds.
50 const int64_t poll_interval = int64_t(ShenandoahUncommitDelay) / 10;
51 const double shrink_period = double(ShenandoahUncommitDelay) / 1000;
52 bool timed_out = false;
53 while (!should_terminate()) {
54 bool soft_max_changed = _soft_max_changed.try_unset();
55 bool explicit_gc_requested = _explicit_gc_requested.try_unset();
56
57 if (soft_max_changed || explicit_gc_requested || timed_out) {
58 double current = os::elapsedTime();
59 size_t shrink_until = soft_max_changed ? _heap->soft_max_capacity() : _heap->min_capacity();
60 double shrink_before = (soft_max_changed || explicit_gc_requested) ?
61 current :
62 current - shrink_period;
63
64 // Explicit GC tries to uncommit everything down to min capacity.
65 // Soft max change tries to uncommit everything down to target capacity.
66 // Periodic uncommit tries to uncommit suitable regions down to min capacity.
67 if (should_uncommit(shrink_before, shrink_until)) {
68 uncommit(shrink_before, shrink_until);
69 }
70 }
71
72 if (!should_terminate()) {
73 MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
74 timed_out = locker.wait(poll_interval);
75 }
76 }
77 }
78
79 bool ShenandoahUncommitThread::should_uncommit(double shrink_before, size_t shrink_until) const {
80 // Only start uncommit if the GC is idle, is not trying to run and there is work to do.
81 return _heap->is_idle() && is_uncommit_allowed() && has_work(shrink_before, shrink_until);
82 }
83
84 bool ShenandoahUncommitThread::has_work(double shrink_before, size_t shrink_until) const {
85 // Determine if there is work to do. This avoids locking the heap if there is
86 // no work available, avoids spamming logs with superfluous logging messages,
87 // and minimises the amount of work while locks are held.
88
89 if (_heap->committed() <= shrink_until) {
90 return false;
91 }
92
93 for (size_t i = 0; i < _heap->num_regions(); i++) {
94 ShenandoahHeapRegion *r = _heap->get_region(i);
95 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
96 return true;
97 }
98 }
99
100 return false;
101 }
102
103 void ShenandoahUncommitThread::notify_soft_max_changed() {
104 assert(is_uncommit_allowed(), "Only notify if uncommit is allowed");
105 if (_soft_max_changed.try_set()) {
106 MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
107 locker.notify_all();
108 }
109 }
110
111 void ShenandoahUncommitThread::notify_explicit_gc_requested() {
112 assert(is_uncommit_allowed(), "Only notify if uncommit is allowed");
113 if (_explicit_gc_requested.try_set()) {
114 MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
115 locker.notify_all();
116 }
117 }
118
119 bool ShenandoahUncommitThread::is_uncommit_allowed() const {
120 return _uncommit_allowed.is_set();
121 }
122
123 void ShenandoahUncommitThread::uncommit(double shrink_before, size_t shrink_until) {
124 assert(ShenandoahUncommit, "should be enabled");
125 assert(_uncommit_in_progress.is_unset(), "Uncommit should not be in progress");
126
127 {
128 // Final check, under the lock, if uncommit is allowed.
129 MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
130 if (is_uncommit_allowed()) {
131 _uncommit_in_progress.set();
132 }
133 }
134
135 // If not allowed to start, do nothing.
136 if (!_uncommit_in_progress.is_set()) {
137 return;
138 }
139
140 // From here on, uncommit is in progress. Attempts to stop the uncommit must wait
141 // until the cancellation request is acknowledged and uncommit is no longer in progress.
142 const char* msg = "Concurrent uncommit";
143 const double start = os::elapsedTime();
144 EventMark em("%s", msg);
145 log_info(gc, start)("%s", msg);
146
147 // This is the number of regions uncommitted during this increment of uncommit work.
148 const size_t uncommitted_region_count = do_uncommit_work(shrink_before, shrink_until);
149
150 {
151 MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
152 _uncommit_in_progress.unset();
153 locker.notify_all();
154 }
155
156 if (uncommitted_region_count > 0) {
157 _heap->notify_heap_changed();
158 }
159
160 const double elapsed = os::elapsedTime() - start;
161 log_info(gc)("%s " PROPERFMT " (" PROPERFMT ") %.3fms",
162 msg, PROPERFMTARGS(uncommitted_region_count * ShenandoahHeapRegion::region_size_bytes()), PROPERFMTARGS(_heap->capacity()),
163 elapsed * MILLIUNITS);
164 }
165
166 size_t ShenandoahUncommitThread::do_uncommit_work(double shrink_before, size_t shrink_until) const {
167 size_t count = 0;
168 // Application allocates from the beginning of the heap, and GC allocates at
169 // the end of it. It is more efficient to uncommit from the end, so that applications
170 // could enjoy the near committed regions. GC allocations are much less frequent,
171 // and therefore can accept the committing costs.
172 for (size_t i = _heap->num_regions(); i > 0; i--) {
173 if (!is_uncommit_allowed()) {
174 // GC wants to start, so the uncommit operation must stop
175 break;
176 }
177
178 ShenandoahHeapRegion* r = _heap->get_region(i - 1);
179 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
180 SuspendibleThreadSetJoiner sts_joiner;
181 ShenandoahHeapLocker heap_locker(_heap->lock());
182 if (r->is_empty_committed()) {
183 if (_heap->committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
184 // We have uncommitted enough regions to hit the target heap committed size
185 break;
186 }
187
188 r->make_uncommitted();
189 count++;
190 }
191 }
192 SpinPause(); // allow allocators to take the lock
193 }
194 return count;
195 }
196
197
198 void ShenandoahUncommitThread::stop_service() {
199 MonitorLocker locker(&_uncommit_lock, Mutex::_safepoint_check_flag);
200 _uncommit_allowed.unset();
201 locker.notify_all();
202 }
203
204 void ShenandoahUncommitThread::forbid_uncommit() {
205 MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
206 _uncommit_allowed.unset();
207 while (_uncommit_in_progress.is_set()) {
208 locker.wait();
209 }
210 }
211
212 void ShenandoahUncommitThread::allow_uncommit() {
213 MonitorLocker locker(&_uncommit_lock, Mutex::_no_safepoint_check_flag);
214 _uncommit_allowed.set();
215 }