1 /* 2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.hpp" 27 #include "nmt/memflags.hpp" 28 #include "nmt/nmtNativeCallStackStorage.hpp" 29 #include "nmt/vmatree.hpp" 30 #include "runtime/os.hpp" 31 #include "unittest.hpp" 32 33 using Tree = VMATree; 34 using Node = Tree::TreapNode; 35 using NCS = NativeCallStackStorage; 36 37 class NMTVMATreeTest : public testing::Test { 38 public: 39 NCS ncs; 40 constexpr static const int si_len = 2; 41 NCS::StackIndex si[si_len]; 42 NativeCallStack stacks[si_len]; 43 44 NMTVMATreeTest() : ncs(true) { 45 stacks[0] = make_stack(0xA); 46 stacks[1] = make_stack(0xB); 47 si[0] = ncs.push(stacks[0]); 48 si[1] = ncs.push(stacks[0]); 49 } 50 51 // Utilities 52 53 VMATree::TreapNode* treap_root(VMATree& tree) { 54 return tree._tree._root; 55 } 56 57 VMATree::VMATreap& treap(VMATree& tree) { 58 return tree._tree; 59 } 60 61 VMATree::TreapNode* find(VMATree::VMATreap& treap, const VMATree::position key) { 62 return treap.find(treap._root, key); 63 } 64 65 NativeCallStack make_stack(size_t a) { 66 NativeCallStack stack((address*)&a, 1); 67 return stack; 68 } 69 70 VMATree::StateType in_type_of(VMATree::TreapNode* x) { 71 return x->val().in.type(); 72 } 73 74 VMATree::StateType out_type_of(VMATree::TreapNode* x) { 75 return x->val().out.type(); 76 } 77 78 int count_nodes(Tree& tree) { 79 int count = 0; 80 treap(tree).visit_in_order([&](Node* x) { 81 ++count; 82 }); 83 return count; 84 } 85 86 // Tests 87 // Adjacent reservations are merged if the properties match. 88 void adjacent_2_nodes(const VMATree::RegionData& rd) { 89 Tree tree; 90 for (int i = 0; i < 10; i++) { 91 tree.reserve_mapping(i * 100, 100, rd); 92 } 93 EXPECT_EQ(2, count_nodes(tree)); 94 95 // Reserving the exact same space again should result in still having only 2 nodes 96 for (int i = 0; i < 10; i++) { 97 tree.reserve_mapping(i * 100, 100, rd); 98 } 99 EXPECT_EQ(2, count_nodes(tree)); 100 101 // Do it backwards instead. 102 Tree tree2; 103 for (int i = 9; i >= 0; i--) { 104 tree2.reserve_mapping(i * 100, 100, rd); 105 } 106 EXPECT_EQ(2, count_nodes(tree2)); 107 } 108 109 // After removing all ranges we should be left with an entirely empty tree 110 void remove_all_leaves_empty_tree(const VMATree::RegionData& rd) { 111 Tree tree; 112 tree.reserve_mapping(0, 100 * 10, rd); 113 for (int i = 0; i < 10; i++) { 114 tree.release_mapping(i * 100, 100); 115 } 116 EXPECT_EQ(nullptr, treap_root(tree)); 117 118 // Other way around 119 tree.reserve_mapping(0, 100 * 10, rd); 120 for (int i = 9; i >= 0; i--) { 121 tree.release_mapping(i * 100, 100); 122 } 123 EXPECT_EQ(nullptr, treap_root(tree)); 124 } 125 126 // Committing in a whole reserved range results in 2 nodes 127 void commit_whole(const VMATree::RegionData& rd) { 128 Tree tree; 129 tree.reserve_mapping(0, 100 * 10, rd); 130 for (int i = 0; i < 10; i++) { 131 tree.commit_mapping(i * 100, 100, rd); 132 } 133 treap(tree).visit_in_order([&](Node* x) { 134 VMATree::StateType in = in_type_of(x); 135 VMATree::StateType out = out_type_of(x); 136 EXPECT_TRUE((in == VMATree::StateType::Released && out == VMATree::StateType::Committed) || 137 (in == VMATree::StateType::Committed && out == VMATree::StateType::Released)); 138 }); 139 EXPECT_EQ(2, count_nodes(tree)); 140 } 141 142 // Committing in middle of reservation ends with a sequence of 4 nodes 143 void commit_middle(const VMATree::RegionData& rd) { 144 Tree tree; 145 tree.reserve_mapping(0, 100, rd); 146 tree.commit_mapping(50, 25, rd); 147 148 size_t found[16]; 149 size_t wanted[4] = {0, 50, 75, 100}; 150 auto exists = [&](size_t x) { 151 for (int i = 0; i < 4; i++) { 152 if (wanted[i] == x) return true; 153 } 154 return false; 155 }; 156 157 int i = 0; 158 treap(tree).visit_in_order([&](Node* x) { 159 if (i < 16) { 160 found[i] = x->key(); 161 } 162 i++; 163 }); 164 165 ASSERT_EQ(4, i) << "0 - 50 - 75 - 100 nodes expected"; 166 EXPECT_TRUE(exists(found[0])); 167 EXPECT_TRUE(exists(found[1])); 168 EXPECT_TRUE(exists(found[2])); 169 EXPECT_TRUE(exists(found[3])); 170 }; 171 }; 172 173 174 175 TEST_VM_F(NMTVMATreeTest, OverlappingReservationsResultInTwoNodes) { 176 VMATree::RegionData rd{si[0], mtTest}; 177 Tree tree; 178 for (int i = 99; i >= 0; i--) { 179 tree.reserve_mapping(i * 100, 101, rd); 180 } 181 EXPECT_EQ(2, count_nodes(tree)); 182 } 183 184 // Low-level tests inspecting the state of the tree. 185 TEST_VM_F(NMTVMATreeTest, LowLevel) { 186 adjacent_2_nodes(VMATree::empty_regiondata); 187 remove_all_leaves_empty_tree(VMATree::empty_regiondata); 188 commit_middle(VMATree::empty_regiondata); 189 commit_whole(VMATree::empty_regiondata); 190 191 VMATree::RegionData rd{si[0], mtTest }; 192 adjacent_2_nodes(rd); 193 remove_all_leaves_empty_tree(rd); 194 commit_middle(rd); 195 commit_whole(rd); 196 197 { // Identical operation but different metadata should not merge 198 Tree tree; 199 VMATree::RegionData rd{si[0], mtTest }; 200 VMATree::RegionData rd2{si[1], mtNMT }; 201 tree.reserve_mapping(0, 100, rd); 202 tree.reserve_mapping(100, 100, rd2); 203 204 EXPECT_EQ(3, count_nodes(tree)); 205 int found_nodes = 0; 206 } 207 208 { // Reserving after commit should overwrite commit 209 Tree tree; 210 VMATree::RegionData rd{si[0], mtTest }; 211 VMATree::RegionData rd2{si[1], mtNMT }; 212 tree.commit_mapping(50, 50, rd2); 213 tree.reserve_mapping(0, 100, rd); 214 treap(tree).visit_in_order([&](Node* x) { 215 EXPECT_TRUE(x->key() == 0 || x->key() == 100); 216 if (x->key() == 0) { 217 EXPECT_EQ(x->val().out.regiondata().flag, mtTest); 218 } 219 }); 220 221 EXPECT_EQ(2, count_nodes(tree)); 222 } 223 224 { // Split a reserved region into two different reserved regions 225 Tree tree; 226 VMATree::RegionData rd{si[0], mtTest }; 227 VMATree::RegionData rd2{si[1], mtNMT }; 228 VMATree::RegionData rd3{si[0], mtNone }; 229 tree.reserve_mapping(0, 100, rd); 230 tree.reserve_mapping(0, 50, rd2); 231 tree.reserve_mapping(50, 50, rd3); 232 233 EXPECT_EQ(3, count_nodes(tree)); 234 } 235 { // One big reserve + release leaves an empty tree 236 Tree::RegionData rd{si[0], mtNMT}; 237 Tree tree; 238 tree.reserve_mapping(0, 500000, rd); 239 tree.release_mapping(0, 500000); 240 241 EXPECT_EQ(nullptr, treap_root(tree)); 242 } 243 { // A committed region inside of/replacing a reserved region 244 // should replace the reserved region's metadata. 245 Tree::RegionData rd{si[0], mtNMT}; 246 VMATree::RegionData rd2{si[1], mtTest}; 247 Tree tree; 248 tree.reserve_mapping(0, 100, rd); 249 tree.commit_mapping(0, 100, rd2); 250 treap(tree).visit_range_in_order(0, 99999, [&](Node* x) { 251 if (x->key() == 0) { 252 EXPECT_EQ(mtTest, x->val().out.regiondata().flag); 253 } 254 if (x->key() == 100) { 255 EXPECT_EQ(mtTest, x->val().in.regiondata().flag); 256 } 257 }); 258 } 259 260 { // Attempting to reserve or commit an empty region should not change the tree. 261 Tree tree; 262 Tree::RegionData rd{si[0], mtNMT}; 263 tree.reserve_mapping(0, 0, rd); 264 EXPECT_EQ(nullptr, treap_root(tree)); 265 tree.commit_mapping(0, 0, rd); 266 EXPECT_EQ(nullptr, treap_root(tree)); 267 } 268 } 269 270 // Tests for summary accounting 271 TEST_VM_F(NMTVMATreeTest, SummaryAccounting) { 272 { // Fully enclosed re-reserving works correctly. 273 Tree::RegionData rd(NCS::StackIndex(), mtTest); 274 Tree::RegionData rd2(NCS::StackIndex(), mtNMT); 275 Tree tree; 276 VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); 277 VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 278 EXPECT_EQ(100, diff.reserve); 279 all_diff = tree.reserve_mapping(50, 25, rd2); 280 diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 281 VMATree::SingleDiff diff2 = all_diff.flag[NMTUtil::flag_to_index(mtNMT)]; 282 EXPECT_EQ(-25, diff.reserve); 283 EXPECT_EQ(25, diff2.reserve); 284 } 285 { // Fully release reserved mapping 286 Tree::RegionData rd(NCS::StackIndex(), mtTest); 287 Tree tree; 288 VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); 289 VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 290 EXPECT_EQ(100, diff.reserve); 291 all_diff = tree.release_mapping(0, 100); 292 diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 293 EXPECT_EQ(-100, diff.reserve); 294 } 295 { // Convert some of a released mapping to a committed one 296 Tree::RegionData rd(NCS::StackIndex(), mtTest); 297 Tree tree; 298 VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); 299 VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 300 EXPECT_EQ(diff.reserve, 100); 301 all_diff = tree.commit_mapping(0, 100, rd); 302 diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 303 EXPECT_EQ(0, diff.reserve); 304 EXPECT_EQ(100, diff.commit); 305 } 306 { // Adjacent reserved mappings with same flag 307 Tree::RegionData rd(NCS::StackIndex(), mtTest); 308 Tree tree; 309 VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); 310 VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 311 EXPECT_EQ(diff.reserve, 100); 312 all_diff = tree.reserve_mapping(100, 100, rd); 313 diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 314 EXPECT_EQ(100, diff.reserve); 315 } 316 { // Adjacent reserved mappings with different flags 317 Tree::RegionData rd(NCS::StackIndex(), mtTest); 318 Tree::RegionData rd2(NCS::StackIndex(), mtNMT); 319 Tree tree; 320 VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); 321 VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 322 EXPECT_EQ(diff.reserve, 100); 323 all_diff = tree.reserve_mapping(100, 100, rd2); 324 diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; 325 EXPECT_EQ(0, diff.reserve); 326 diff = all_diff.flag[NMTUtil::flag_to_index(mtNMT)]; 327 EXPECT_EQ(100, diff.reserve); 328 } 329 330 { // A commit with two previous commits inside of it should only register 331 // the new memory in the commit diff. 332 Tree tree; 333 Tree::RegionData rd(NCS::StackIndex(), mtTest); 334 tree.commit_mapping(128, 128, rd); 335 tree.commit_mapping(512, 128, rd); 336 VMATree::SummaryDiff diff = tree.commit_mapping(0, 1024, rd); 337 EXPECT_EQ(768, diff.flag[NMTUtil::flag_to_index(mtTest)].commit); 338 EXPECT_EQ(768, diff.flag[NMTUtil::flag_to_index(mtTest)].reserve); 339 } 340 } 341 342 // Exceedingly simple tracker for page-granular allocations 343 // Use it for testing consistency with VMATree. 344 struct SimpleVMATracker : public CHeapObj<mtTest> { 345 const size_t page_size = 4096; 346 enum Type { Reserved, Committed, Free }; 347 struct Info { 348 Type type; 349 MEMFLAGS flag; 350 NativeCallStack stack; 351 Info() : type(Free), flag(mtNone), stack() {} 352 353 Info(Type type, NativeCallStack stack, MEMFLAGS flag) 354 : type(type), flag(flag), stack(stack) {} 355 356 bool eq(Info other) { 357 return flag == other.flag && stack.equals(other.stack); 358 } 359 }; 360 // Page (4KiB) granular array 361 static constexpr const size_t num_pages = 1024 * 4; 362 Info pages[num_pages]; 363 364 SimpleVMATracker() 365 : pages() { 366 for (size_t i = 0; i < num_pages; i++) { 367 pages[i] = Info(); 368 } 369 } 370 371 VMATree::SummaryDiff do_it(Type type, size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) { 372 assert(is_aligned(size, page_size) && is_aligned(start, page_size), "page alignment"); 373 374 VMATree::SummaryDiff diff; 375 const size_t page_count = size / page_size; 376 const size_t start_idx = start / page_size; 377 const size_t end_idx = start_idx + page_count; 378 assert(end_idx < SimpleVMATracker::num_pages, ""); 379 380 Info new_info(type, stack, flag); 381 for (size_t i = start_idx; i < end_idx; i++) { 382 Info& old_info = pages[i]; 383 384 // Register diff 385 if (old_info.type == Reserved) { 386 diff.flag[(int)old_info.flag].reserve -= page_size; 387 } else if (old_info.type == Committed) { 388 diff.flag[(int)old_info.flag].reserve -= page_size; 389 diff.flag[(int)old_info.flag].commit -= page_size; 390 } 391 392 if (type == Reserved) { 393 diff.flag[(int)new_info.flag].reserve += page_size; 394 } else if(type == Committed) { 395 diff.flag[(int)new_info.flag].reserve += page_size; 396 diff.flag[(int)new_info.flag].commit += page_size; 397 } 398 // Overwrite old one with new 399 pages[i] = new_info; 400 } 401 return diff; 402 } 403 404 VMATree::SummaryDiff reserve(size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) { 405 return do_it(Reserved, start, size, stack, flag); 406 } 407 408 VMATree::SummaryDiff commit(size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) { 409 return do_it(Committed, start, size, stack, flag); 410 } 411 412 VMATree::SummaryDiff release(size_t start, size_t size) { 413 return do_it(Free, start, size, NativeCallStack(), mtNone); 414 } 415 }; 416 417 constexpr const size_t SimpleVMATracker::num_pages; 418 419 TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) { 420 // In this test we use ASSERT macros from gtest instead of EXPECT 421 // as any error will propagate and become larger as the test progresses. 422 SimpleVMATracker* tr = new SimpleVMATracker(); 423 const size_t page_size = tr->page_size; 424 VMATree tree; 425 NCS ncss(true); 426 constexpr const int candidates_len_flags = 4; 427 constexpr const int candidates_len_stacks = 2; 428 429 NativeCallStack candidate_stacks[candidates_len_stacks] = { 430 make_stack(0xA), 431 make_stack(0xB), 432 }; 433 434 const MEMFLAGS candidate_flags[candidates_len_flags] = { 435 mtNMT, 436 mtTest, 437 }; 438 439 const int operation_count = 100000; // One hundred thousand 440 for (int i = 0; i < operation_count; i++) { 441 size_t page_start = (size_t)(os::random() % SimpleVMATracker::num_pages); 442 size_t page_end = (size_t)(os::random() % (SimpleVMATracker::num_pages)); 443 444 if (page_end < page_start) { 445 const size_t temp = page_start; 446 page_start = page_end; 447 page_end = page_start; 448 } 449 const size_t num_pages = page_end - page_start; 450 451 if (num_pages == 0) { 452 i--; continue; 453 } 454 455 const size_t start = page_start * page_size; 456 const size_t size = num_pages * page_size; 457 458 const MEMFLAGS flag = candidate_flags[os::random() % candidates_len_flags]; 459 const NativeCallStack stack = candidate_stacks[os::random() % candidates_len_stacks]; 460 461 const NCS::StackIndex si = ncss.push(stack); 462 VMATree::RegionData data(si, flag); 463 464 const SimpleVMATracker::Type type = (SimpleVMATracker::Type)(os::random() % 3); 465 466 VMATree::SummaryDiff tree_diff; 467 VMATree::SummaryDiff simple_diff; 468 if (type == SimpleVMATracker::Reserved) { 469 simple_diff = tr->reserve(start, size, stack, flag); 470 tree_diff = tree.reserve_mapping(start, size, data); 471 } else if (type == SimpleVMATracker::Committed) { 472 simple_diff = tr->commit(start, size, stack, flag); 473 tree_diff = tree.commit_mapping(start, size, data); 474 } else { 475 simple_diff = tr->release(start, size); 476 tree_diff = tree.release_mapping(start, size); 477 } 478 479 for (int j = 0; j < mt_number_of_types; j++) { 480 VMATree::SingleDiff td = tree_diff.flag[j]; 481 VMATree::SingleDiff sd = simple_diff.flag[j]; 482 ASSERT_EQ(td.reserve, sd.reserve); 483 ASSERT_EQ(td.commit, sd.commit); 484 } 485 486 487 // Do an in-depth check every 25 000 iterations. 488 if (i % 25000 == 0) { 489 size_t j = 0; 490 while (j < SimpleVMATracker::num_pages) { 491 while (j < SimpleVMATracker::num_pages && 492 tr->pages[j].type == SimpleVMATracker::Free) { 493 j++; 494 } 495 496 if (j == SimpleVMATracker::num_pages) { 497 break; 498 } 499 500 size_t start = j; 501 SimpleVMATracker::Info starti = tr->pages[start]; 502 503 while (j < SimpleVMATracker::num_pages && 504 tr->pages[j].eq(starti)) { 505 j++; 506 } 507 508 size_t end = j-1; 509 ASSERT_LE(end, SimpleVMATracker::num_pages); 510 SimpleVMATracker::Info endi = tr->pages[end]; 511 512 VMATree::VMATreap& treap = this->treap(tree); 513 VMATree::TreapNode* startn = find(treap, start * page_size); 514 ASSERT_NE(nullptr, startn); 515 VMATree::TreapNode* endn = find(treap, (end * page_size) + page_size); 516 ASSERT_NE(nullptr, endn); 517 518 const NativeCallStack& start_stack = ncss.get(startn->val().out.stack()); 519 const NativeCallStack& end_stack = ncss.get(endn->val().in.stack()); 520 ASSERT_TRUE(starti.stack.equals(start_stack)); 521 ASSERT_TRUE(endi.stack.equals(end_stack)); 522 523 ASSERT_EQ(starti.flag, startn->val().out.flag()); 524 ASSERT_EQ(endi.flag, endn->val().in.flag()); 525 } 526 } 527 } 528 }