1 /* 2 * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gc_globals.hpp" 26 #include "gc/shared/suspendibleThreadSet.hpp" 27 #include "gc/z/zAbort.inline.hpp" 28 #include "gc/z/zAddress.inline.hpp" 29 #include "gc/z/zBarrier.inline.hpp" 30 #include "gc/z/zForwarding.inline.hpp" 31 #include "gc/z/zHeap.inline.hpp" 32 #include "gc/z/zPage.inline.hpp" 33 #include "gc/z/zRelocate.hpp" 34 #include "gc/z/zRelocationSet.inline.hpp" 35 #include "gc/z/zStat.hpp" 36 #include "gc/z/zTask.hpp" 37 #include "gc/z/zThread.inline.hpp" 38 #include "gc/z/zWorkers.hpp" 39 #include "prims/jvmtiTagMap.hpp" 40 #include "runtime/atomic.hpp" 41 #include "utilities/debug.hpp" 42 43 ZRelocate::ZRelocate(ZWorkers* workers) : 44 _workers(workers) {} 45 46 static uintptr_t forwarding_index(ZForwarding* forwarding, uintptr_t from_addr) { 47 const uintptr_t from_offset = ZAddress::offset(from_addr); 48 return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift(); 49 } 50 51 static uintptr_t forwarding_find(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) { 52 const uintptr_t from_index = forwarding_index(forwarding, from_addr); 53 const ZForwardingEntry entry = forwarding->find(from_index, cursor); 54 return entry.populated() ? ZAddress::good(entry.to_offset()) : 0; 55 } 56 57 static uintptr_t forwarding_insert(ZForwarding* forwarding, uintptr_t from_addr, uintptr_t to_addr, ZForwardingCursor* cursor) { 58 const uintptr_t from_index = forwarding_index(forwarding, from_addr); 59 const uintptr_t to_offset = ZAddress::offset(to_addr); 60 const uintptr_t to_offset_final = forwarding->insert(from_index, to_offset, cursor); 61 return ZAddress::good(to_offset_final); 62 } 63 64 static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) { 65 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live"); 66 67 // Allocate object 68 const size_t size = ZUtils::object_size(from_addr); 69 const uintptr_t to_addr = ZHeap::heap()->alloc_object_for_relocation(size); 70 if (to_addr == 0) { 71 // Allocation failed 72 return 0; 73 } 74 75 // Copy object 76 ZUtils::object_copy_disjoint(from_addr, to_addr, size); 77 78 // Insert forwarding 79 const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor); 80 if (to_addr_final != to_addr) { 81 // Already relocated, try undo allocation 82 ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size); 83 } 84 85 return to_addr_final; 86 } 87 88 uintptr_t ZRelocate::relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const { 89 ZForwardingCursor cursor; 90 91 // Lookup forwarding 92 uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); 93 if (to_addr != 0) { 94 // Already relocated 95 return to_addr; 96 } 97 98 // Relocate object 99 if (forwarding->retain_page()) { 100 to_addr = relocate_object_inner(forwarding, from_addr, &cursor); 101 forwarding->release_page(); 102 103 if (to_addr != 0) { 104 // Success 105 return to_addr; 106 } 107 108 // Failed to relocate object. Wait for a worker thread to complete 109 // relocation of this page, and then forward the object. If the GC 110 // aborts the relocation phase before the page has been relocated, 111 // then wait return false and we just forward the object in-place. 112 if (!forwarding->wait_page_released()) { 113 // Forward object in-place 114 return forwarding_insert(forwarding, from_addr, from_addr, &cursor); 115 } 116 } 117 118 // Forward object 119 return forward_object(forwarding, from_addr); 120 } 121 122 uintptr_t ZRelocate::forward_object(ZForwarding* forwarding, uintptr_t from_addr) const { 123 ZForwardingCursor cursor; 124 const uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); 125 assert(to_addr != 0, "Should be forwarded"); 126 return to_addr; 127 } 128 129 static ZPage* alloc_page(const ZForwarding* forwarding) { 130 if (ZStressRelocateInPlace) { 131 // Simulate failure to allocate a new page. This will 132 // cause the page being relocated to be relocated in-place. 133 return NULL; 134 } 135 136 ZAllocationFlags flags; 137 flags.set_non_blocking(); 138 flags.set_worker_relocation(); 139 return ZHeap::heap()->alloc_page(forwarding->type(), forwarding->size(), flags); 140 } 141 142 static void free_page(ZPage* page) { 143 ZHeap::heap()->free_page(page, true /* reclaimed */); 144 } 145 146 static bool should_free_target_page(ZPage* page) { 147 // Free target page if it is empty. We can end up with an empty target 148 // page if we allocated a new target page, and then lost the race to 149 // relocate the remaining objects, leaving the target page empty when 150 // relocation completed. 151 return page != NULL && page->top() == page->start(); 152 } 153 154 class ZRelocateSmallAllocator { 155 private: 156 volatile size_t _in_place_count; 157 158 public: 159 ZRelocateSmallAllocator() : 160 _in_place_count(0) {} 161 162 ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) { 163 ZPage* const page = alloc_page(forwarding); 164 if (page == NULL) { 165 Atomic::inc(&_in_place_count); 166 } 167 168 return page; 169 } 170 171 void share_target_page(ZPage* page) { 172 // Does nothing 173 } 174 175 void free_target_page(ZPage* page) { 176 if (should_free_target_page(page)) { 177 free_page(page); 178 } 179 } 180 181 void free_relocated_page(ZPage* page) { 182 free_page(page); 183 } 184 185 uintptr_t alloc_object(ZPage* page, size_t size) const { 186 return (page != NULL) ? page->alloc_object(size) : 0; 187 } 188 189 void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const { 190 page->undo_alloc_object(addr, size); 191 } 192 193 const size_t in_place_count() const { 194 return _in_place_count; 195 } 196 }; 197 198 class ZRelocateMediumAllocator { 199 private: 200 ZConditionLock _lock; 201 ZPage* _shared; 202 bool _in_place; 203 volatile size_t _in_place_count; 204 205 public: 206 ZRelocateMediumAllocator() : 207 _lock(), 208 _shared(NULL), 209 _in_place(false), 210 _in_place_count(0) {} 211 212 ~ZRelocateMediumAllocator() { 213 if (should_free_target_page(_shared)) { 214 free_page(_shared); 215 } 216 } 217 218 ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) { 219 ZLocker<ZConditionLock> locker(&_lock); 220 221 // Wait for any ongoing in-place relocation to complete 222 while (_in_place) { 223 _lock.wait(); 224 } 225 226 // Allocate a new page only if the shared page is the same as the 227 // current target page. The shared page will be different from the 228 // current target page if another thread shared a page, or allocated 229 // a new page. 230 if (_shared == target) { 231 _shared = alloc_page(forwarding); 232 if (_shared == NULL) { 233 Atomic::inc(&_in_place_count); 234 _in_place = true; 235 } 236 } 237 238 return _shared; 239 } 240 241 void share_target_page(ZPage* page) { 242 ZLocker<ZConditionLock> locker(&_lock); 243 244 assert(_in_place, "Invalid state"); 245 assert(_shared == NULL, "Invalid state"); 246 assert(page != NULL, "Invalid page"); 247 248 _shared = page; 249 _in_place = false; 250 251 _lock.notify_all(); 252 } 253 254 void free_target_page(ZPage* page) { 255 // Does nothing 256 } 257 258 void free_relocated_page(ZPage* page) { 259 free_page(page); 260 } 261 262 uintptr_t alloc_object(ZPage* page, size_t size) const { 263 return (page != NULL) ? page->alloc_object_atomic(size) : 0; 264 } 265 266 void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const { 267 page->undo_alloc_object_atomic(addr, size); 268 } 269 270 const size_t in_place_count() const { 271 return _in_place_count; 272 } 273 }; 274 275 template <typename Allocator> 276 class ZRelocateClosure : public ObjectClosure { 277 private: 278 Allocator* const _allocator; 279 ZForwarding* _forwarding; 280 ZPage* _target; 281 282 bool relocate_object(uintptr_t from_addr) const { 283 ZForwardingCursor cursor; 284 285 // Lookup forwarding 286 if (forwarding_find(_forwarding, from_addr, &cursor) != 0) { 287 // Already relocated 288 return true; 289 } 290 291 // Allocate object 292 const size_t size = ZUtils::object_size(from_addr); 293 const uintptr_t to_addr = _allocator->alloc_object(_target, size); 294 if (to_addr == 0) { 295 // Allocation failed 296 return false; 297 } 298 299 // Copy object. Use conjoint copying if we are relocating 300 // in-place and the new object overlapps with the old object. 301 if (_forwarding->in_place() && to_addr + size > from_addr) { 302 ZUtils::object_copy_conjoint(from_addr, to_addr, size); 303 } else { 304 ZUtils::object_copy_disjoint(from_addr, to_addr, size); 305 } 306 307 // Insert forwarding 308 if (forwarding_insert(_forwarding, from_addr, to_addr, &cursor) != to_addr) { 309 // Already relocated, undo allocation 310 _allocator->undo_alloc_object(_target, to_addr, size); 311 } 312 313 return true; 314 } 315 316 virtual void do_object(oop obj) { 317 const uintptr_t addr = ZOop::to_address(obj); 318 assert(ZHeap::heap()->is_object_live(addr), "Should be live"); 319 320 while (!relocate_object(addr)) { 321 // Allocate a new target page, or if that fails, use the page being 322 // relocated as the new target, which will cause it to be relocated 323 // in-place. 324 _target = _allocator->alloc_target_page(_forwarding, _target); 325 if (_target != NULL) { 326 continue; 327 } 328 329 // Claim the page being relocated to block other threads from accessing 330 // it, or its forwarding table, until it has been released (relocation 331 // completed). 332 _target = _forwarding->claim_page(); 333 _target->reset_for_in_place_relocation(); 334 _forwarding->set_in_place(); 335 } 336 337 if (SuspendibleThreadSet::should_yield()) { 338 SuspendibleThreadSet::yield(); 339 } 340 } 341 342 public: 343 ZRelocateClosure(Allocator* allocator) : 344 _allocator(allocator), 345 _forwarding(NULL), 346 _target(NULL) {} 347 348 ~ZRelocateClosure() { 349 _allocator->free_target_page(_target); 350 } 351 352 void do_forwarding(ZForwarding* forwarding) { 353 _forwarding = forwarding; 354 355 // Check if we should abort 356 if (ZAbort::should_abort()) { 357 _forwarding->abort_page(); 358 return; 359 } 360 361 // Relocate objects 362 _forwarding->object_iterate(this); 363 364 // Verify 365 if (ZVerifyForwarding) { 366 _forwarding->verify(); 367 } 368 369 // Release relocated page 370 _forwarding->release_page(); 371 372 if (_forwarding->in_place()) { 373 // The relocated page has been relocated in-place and should not 374 // be freed. Keep it as target page until it is full, and offer to 375 // share it with other worker threads. 376 _allocator->share_target_page(_target); 377 } else { 378 // Detach and free relocated page 379 ZPage* const page = _forwarding->detach_page(); 380 _allocator->free_relocated_page(page); 381 } 382 } 383 }; 384 385 class ZRelocateTask : public ZTask { 386 private: 387 ZRelocationSetParallelIterator _iter; 388 ZRelocateSmallAllocator _small_allocator; 389 ZRelocateMediumAllocator _medium_allocator; 390 391 static bool is_small(ZForwarding* forwarding) { 392 return forwarding->type() == ZPageTypeSmall; 393 } 394 395 public: 396 ZRelocateTask(ZRelocationSet* relocation_set) : 397 ZTask("ZRelocateTask"), 398 _iter(relocation_set), 399 _small_allocator(), 400 _medium_allocator() {} 401 402 ~ZRelocateTask() { 403 ZStatRelocation::set_at_relocate_end(_small_allocator.in_place_count(), 404 _medium_allocator.in_place_count()); 405 } 406 407 virtual void work() { 408 ZRelocateClosure<ZRelocateSmallAllocator> small(&_small_allocator); 409 ZRelocateClosure<ZRelocateMediumAllocator> medium(&_medium_allocator); 410 411 SuspendibleThreadSetJoiner sts_joiner; 412 for (ZForwarding* forwarding; _iter.next(&forwarding);) { 413 if (is_small(forwarding)) { 414 small.do_forwarding(forwarding); 415 } else { 416 medium.do_forwarding(forwarding); 417 } 418 } 419 } 420 }; 421 422 void ZRelocate::relocate(ZRelocationSet* relocation_set) { 423 ZRelocateTask task(relocation_set); 424 _workers->run(&task); 425 } --- EOF ---