1 /* 2 * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gc_globals.hpp" 26 #include "gc/z/zAbort.inline.hpp" 27 #include "gc/z/zAddress.inline.hpp" 28 #include "gc/z/zBarrier.inline.hpp" 29 #include "gc/z/zForwarding.inline.hpp" 30 #include "gc/z/zHeap.inline.hpp" 31 #include "gc/z/zPage.inline.hpp" 32 #include "gc/z/zRelocate.hpp" 33 #include "gc/z/zRelocationSet.inline.hpp" 34 #include "gc/z/zStat.hpp" 35 #include "gc/z/zTask.hpp" 36 #include "gc/z/zThread.inline.hpp" 37 #include "gc/z/zWorkers.hpp" 38 #include "prims/jvmtiTagMap.hpp" 39 #include "runtime/atomic.hpp" 40 #include "utilities/debug.hpp" 41 42 ZRelocate::ZRelocate(ZWorkers* workers) : 43 _workers(workers) {} 44 45 static uintptr_t forwarding_index(ZForwarding* forwarding, uintptr_t from_addr) { 46 const uintptr_t from_offset = ZAddress::offset(from_addr); 47 return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift(); 48 } 49 50 static uintptr_t forwarding_find(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) { 51 const uintptr_t from_index = forwarding_index(forwarding, from_addr); 52 const ZForwardingEntry entry = forwarding->find(from_index, cursor); 53 return entry.populated() ? ZAddress::good(entry.to_offset()) : 0; 54 } 55 56 static uintptr_t forwarding_insert(ZForwarding* forwarding, uintptr_t from_addr, uintptr_t to_addr, ZForwardingCursor* cursor) { 57 const uintptr_t from_index = forwarding_index(forwarding, from_addr); 58 const uintptr_t to_offset = ZAddress::offset(to_addr); 59 const uintptr_t to_offset_final = forwarding->insert(from_index, to_offset, cursor); 60 return ZAddress::good(to_offset_final); 61 } 62 63 static uintptr_t relocate_object_inner(ZForwarding* forwarding, uintptr_t from_addr, ZForwardingCursor* cursor) { 64 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live"); 65 66 // Allocate object 67 const size_t size = ZUtils::object_size(from_addr); 68 const uintptr_t to_addr = ZHeap::heap()->alloc_object_for_relocation(size); 69 if (to_addr == 0) { 70 // Allocation failed 71 return 0; 72 } 73 74 // Copy object 75 ZUtils::object_copy_disjoint(from_addr, to_addr, size); 76 77 // Insert forwarding 78 const uintptr_t to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor); 79 if (to_addr_final != to_addr) { 80 // Already relocated, try undo allocation 81 ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size); 82 } 83 84 return to_addr_final; 85 } 86 87 uintptr_t ZRelocate::relocate_object(ZForwarding* forwarding, uintptr_t from_addr) const { 88 ZForwardingCursor cursor; 89 90 // Lookup forwarding 91 uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); 92 if (to_addr != 0) { 93 // Already relocated 94 return to_addr; 95 } 96 97 // Relocate object 98 if (forwarding->retain_page()) { 99 to_addr = relocate_object_inner(forwarding, from_addr, &cursor); 100 forwarding->release_page(); 101 102 if (to_addr != 0) { 103 // Success 104 return to_addr; 105 } 106 107 // Failed to relocate object. Wait for a worker thread to complete 108 // relocation of this page, and then forward the object. If the GC 109 // aborts the relocation phase before the page has been relocated, 110 // then wait return false and we just forward the object in-place. 111 if (!forwarding->wait_page_released()) { 112 // Forward object in-place 113 return forwarding_insert(forwarding, from_addr, from_addr, &cursor); 114 } 115 } 116 117 // Forward object 118 return forward_object(forwarding, from_addr); 119 } 120 121 uintptr_t ZRelocate::forward_object(ZForwarding* forwarding, uintptr_t from_addr) const { 122 ZForwardingCursor cursor; 123 const uintptr_t to_addr = forwarding_find(forwarding, from_addr, &cursor); 124 assert(to_addr != 0, "Should be forwarded"); 125 return to_addr; 126 } 127 128 static ZPage* alloc_page(const ZForwarding* forwarding) { 129 if (ZStressRelocateInPlace) { 130 // Simulate failure to allocate a new page. This will 131 // cause the page being relocated to be relocated in-place. 132 return NULL; 133 } 134 135 ZAllocationFlags flags; 136 flags.set_non_blocking(); 137 flags.set_worker_relocation(); 138 return ZHeap::heap()->alloc_page(forwarding->type(), forwarding->size(), flags); 139 } 140 141 static void free_page(ZPage* page) { 142 ZHeap::heap()->free_page(page, true /* reclaimed */); 143 } 144 145 static bool should_free_target_page(ZPage* page) { 146 // Free target page if it is empty. We can end up with an empty target 147 // page if we allocated a new target page, and then lost the race to 148 // relocate the remaining objects, leaving the target page empty when 149 // relocation completed. 150 return page != NULL && page->top() == page->start(); 151 } 152 153 class ZRelocateSmallAllocator { 154 private: 155 volatile size_t _in_place_count; 156 157 public: 158 ZRelocateSmallAllocator() : 159 _in_place_count(0) {} 160 161 ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) { 162 ZPage* const page = alloc_page(forwarding); 163 if (page == NULL) { 164 Atomic::inc(&_in_place_count); 165 } 166 167 return page; 168 } 169 170 void share_target_page(ZPage* page) { 171 // Does nothing 172 } 173 174 void free_target_page(ZPage* page) { 175 if (should_free_target_page(page)) { 176 free_page(page); 177 } 178 } 179 180 void free_relocated_page(ZPage* page) { 181 free_page(page); 182 } 183 184 uintptr_t alloc_object(ZPage* page, size_t size) const { 185 return (page != NULL) ? page->alloc_object(size) : 0; 186 } 187 188 void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const { 189 page->undo_alloc_object(addr, size); 190 } 191 192 const size_t in_place_count() const { 193 return _in_place_count; 194 } 195 }; 196 197 class ZRelocateMediumAllocator { 198 private: 199 ZConditionLock _lock; 200 ZPage* _shared; 201 bool _in_place; 202 volatile size_t _in_place_count; 203 204 public: 205 ZRelocateMediumAllocator() : 206 _lock(), 207 _shared(NULL), 208 _in_place(false), 209 _in_place_count(0) {} 210 211 ~ZRelocateMediumAllocator() { 212 if (should_free_target_page(_shared)) { 213 free_page(_shared); 214 } 215 } 216 217 ZPage* alloc_target_page(ZForwarding* forwarding, ZPage* target) { 218 ZLocker<ZConditionLock> locker(&_lock); 219 220 // Wait for any ongoing in-place relocation to complete 221 while (_in_place) { 222 _lock.wait(); 223 } 224 225 // Allocate a new page only if the shared page is the same as the 226 // current target page. The shared page will be different from the 227 // current target page if another thread shared a page, or allocated 228 // a new page. 229 if (_shared == target) { 230 _shared = alloc_page(forwarding); 231 if (_shared == NULL) { 232 Atomic::inc(&_in_place_count); 233 _in_place = true; 234 } 235 } 236 237 return _shared; 238 } 239 240 void share_target_page(ZPage* page) { 241 ZLocker<ZConditionLock> locker(&_lock); 242 243 assert(_in_place, "Invalid state"); 244 assert(_shared == NULL, "Invalid state"); 245 assert(page != NULL, "Invalid page"); 246 247 _shared = page; 248 _in_place = false; 249 250 _lock.notify_all(); 251 } 252 253 void free_target_page(ZPage* page) { 254 // Does nothing 255 } 256 257 void free_relocated_page(ZPage* page) { 258 free_page(page); 259 } 260 261 uintptr_t alloc_object(ZPage* page, size_t size) const { 262 return (page != NULL) ? page->alloc_object_atomic(size) : 0; 263 } 264 265 void undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) const { 266 page->undo_alloc_object_atomic(addr, size); 267 } 268 269 const size_t in_place_count() const { 270 return _in_place_count; 271 } 272 }; 273 274 template <typename Allocator> 275 class ZRelocateClosure : public ObjectClosure { 276 private: 277 Allocator* const _allocator; 278 ZForwarding* _forwarding; 279 ZPage* _target; 280 281 bool relocate_object(uintptr_t from_addr) const { 282 ZForwardingCursor cursor; 283 284 // Lookup forwarding 285 if (forwarding_find(_forwarding, from_addr, &cursor) != 0) { 286 // Already relocated 287 return true; 288 } 289 290 // Allocate object 291 const size_t size = ZUtils::object_size(from_addr); 292 const uintptr_t to_addr = _allocator->alloc_object(_target, size); 293 if (to_addr == 0) { 294 // Allocation failed 295 return false; 296 } 297 298 // Copy object. Use conjoint copying if we are relocating 299 // in-place and the new object overlapps with the old object. 300 if (_forwarding->in_place() && to_addr + size > from_addr) { 301 ZUtils::object_copy_conjoint(from_addr, to_addr, size); 302 } else { 303 ZUtils::object_copy_disjoint(from_addr, to_addr, size); 304 } 305 306 // Insert forwarding 307 if (forwarding_insert(_forwarding, from_addr, to_addr, &cursor) != to_addr) { 308 // Already relocated, undo allocation 309 _allocator->undo_alloc_object(_target, to_addr, size); 310 } 311 312 return true; 313 } 314 315 virtual void do_object(oop obj) { 316 const uintptr_t addr = ZOop::to_address(obj); 317 assert(ZHeap::heap()->is_object_live(addr), "Should be live"); 318 319 while (!relocate_object(addr)) { 320 // Allocate a new target page, or if that fails, use the page being 321 // relocated as the new target, which will cause it to be relocated 322 // in-place. 323 _target = _allocator->alloc_target_page(_forwarding, _target); 324 if (_target != NULL) { 325 continue; 326 } 327 328 // Claim the page being relocated to block other threads from accessing 329 // it, or its forwarding table, until it has been released (relocation 330 // completed). 331 _target = _forwarding->claim_page(); 332 _target->reset_for_in_place_relocation(); 333 _forwarding->set_in_place(); 334 } 335 } 336 337 public: 338 ZRelocateClosure(Allocator* allocator) : 339 _allocator(allocator), 340 _forwarding(NULL), 341 _target(NULL) {} 342 343 ~ZRelocateClosure() { 344 _allocator->free_target_page(_target); 345 } 346 347 void do_forwarding(ZForwarding* forwarding) { 348 _forwarding = forwarding; 349 350 // Check if we should abort 351 if (ZAbort::should_abort()) { 352 _forwarding->abort_page(); 353 return; 354 } 355 356 // Relocate objects 357 _forwarding->object_iterate(this); 358 359 // Verify 360 if (ZVerifyForwarding) { 361 _forwarding->verify(); 362 } 363 364 // Release relocated page 365 _forwarding->release_page(); 366 367 if (_forwarding->in_place()) { 368 // The relocated page has been relocated in-place and should not 369 // be freed. Keep it as target page until it is full, and offer to 370 // share it with other worker threads. 371 _allocator->share_target_page(_target); 372 } else { 373 // Detach and free relocated page 374 ZPage* const page = _forwarding->detach_page(); 375 _allocator->free_relocated_page(page); 376 } 377 } 378 }; 379 380 class ZRelocateTask : public ZTask { 381 private: 382 ZRelocationSetParallelIterator _iter; 383 ZRelocateSmallAllocator _small_allocator; 384 ZRelocateMediumAllocator _medium_allocator; 385 386 static bool is_small(ZForwarding* forwarding) { 387 return forwarding->type() == ZPageTypeSmall; 388 } 389 390 public: 391 ZRelocateTask(ZRelocationSet* relocation_set) : 392 ZTask("ZRelocateTask"), 393 _iter(relocation_set), 394 _small_allocator(), 395 _medium_allocator() {} 396 397 ~ZRelocateTask() { 398 ZStatRelocation::set_at_relocate_end(_small_allocator.in_place_count(), 399 _medium_allocator.in_place_count()); 400 } 401 402 virtual void work() { 403 ZRelocateClosure<ZRelocateSmallAllocator> small(&_small_allocator); 404 ZRelocateClosure<ZRelocateMediumAllocator> medium(&_medium_allocator); 405 406 for (ZForwarding* forwarding; _iter.next(&forwarding);) { 407 if (is_small(forwarding)) { 408 small.do_forwarding(forwarding); 409 } else { 410 medium.do_forwarding(forwarding); 411 } 412 } 413 } 414 }; 415 416 void ZRelocate::relocate(ZRelocationSet* relocation_set) { 417 ZRelocateTask task(relocation_set); 418 _workers->run(&task); 419 } --- EOF ---