1 /*
  2  * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "memory/allocation.hpp"
 26 #include "memory/resourceArea.hpp"
 27 #include "nmt/memTracker.hpp"
 28 #include "runtime/frame.inline.hpp"
 29 #include "runtime/globals.hpp"
 30 #include "runtime/os.inline.hpp"
 31 #include "runtime/thread.hpp"
 32 #include "runtime/threads.hpp"
 33 #include "utilities/align.hpp"
 34 #include "utilities/globalDefinitions.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "utilities/ostream.hpp"
 37 #include "unittest.hpp"
 38 #ifdef _WIN32
 39 #include "os_windows.hpp"
 40 #endif
 41 
 42 using testing::HasSubstr;
 43 
 44 static size_t small_page_size() {
 45   return os::vm_page_size();
 46 }
 47 
 48 static size_t large_page_size() {
 49   const size_t large_page_size_example = 4 * M;
 50   return os::page_size_for_region_aligned(large_page_size_example, 1);
 51 }
 52 
 53 TEST_VM(os, page_size_for_region) {
 54   size_t large_page_example = 4 * M;
 55   size_t large_page = os::page_size_for_region_aligned(large_page_example, 1);
 56 
 57   size_t small_page = os::vm_page_size();
 58   if (large_page > small_page) {
 59     size_t num_small_in_large = large_page / small_page;
 60     size_t page = os::page_size_for_region_aligned(large_page, num_small_in_large);
 61     ASSERT_EQ(page, small_page) << "Did not get a small page";
 62   }
 63 }
 64 
 65 TEST_VM(os, page_size_for_region_aligned) {
 66   if (UseLargePages) {
 67     const size_t small_page = small_page_size();
 68     const size_t large_page = large_page_size();
 69 
 70     if (large_page > small_page) {
 71       size_t num_small_pages_in_large = large_page / small_page;
 72       size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large);
 73 
 74       ASSERT_EQ(page, small_page);
 75     }
 76   }
 77 }
 78 
 79 TEST_VM(os, page_size_for_region_alignment) {
 80   if (UseLargePages) {
 81     const size_t small_page = small_page_size();
 82     const size_t large_page = large_page_size();
 83     if (large_page > small_page) {
 84       const size_t unaligned_region = large_page + 17;
 85       size_t page = os::page_size_for_region_aligned(unaligned_region, 1);
 86       ASSERT_EQ(page, small_page);
 87 
 88       const size_t num_pages = 5;
 89       const size_t aligned_region = large_page * num_pages;
 90       page = os::page_size_for_region_aligned(aligned_region, num_pages);
 91       ASSERT_EQ(page, large_page);
 92     }
 93   }
 94 }
 95 
 96 TEST_VM(os, page_size_for_region_unaligned) {
 97   if (UseLargePages) {
 98     // Given exact page size, should return that page size.
 99     for (size_t s = os::page_sizes().largest(); s != 0; s = os::page_sizes().next_smaller(s)) {
100       size_t actual = os::page_size_for_region_unaligned(s, 1);
101       ASSERT_EQ(s, actual);
102     }
103 
104     // Given slightly larger size than a page size, return the page size.
105     for (size_t s = os::page_sizes().largest(); s != 0; s = os::page_sizes().next_smaller(s)) {
106       size_t actual = os::page_size_for_region_unaligned(s + 17, 1);
107       ASSERT_EQ(s, actual);
108     }
109 
110     // Given a slightly smaller size than a page size,
111     // return the next smaller page size.
112     for (size_t s = os::page_sizes().largest(); s != 0; s = os::page_sizes().next_smaller(s)) {
113       const size_t expected = os::page_sizes().next_smaller(s);
114       if (expected != 0) {
115         size_t actual = os::page_size_for_region_unaligned(s - 17, 1);
116         ASSERT_EQ(actual, expected);
117       }
118     }
119 
120     // Return small page size for values less than a small page.
121     size_t small_page = os::page_sizes().smallest();
122     size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1);
123     ASSERT_EQ(small_page, actual);
124   }
125 }
126 
127 TEST(os, test_random) {
128   const double m = 2147483647;
129   double mean = 0.0, variance = 0.0, t;
130   const int reps = 10000;
131   unsigned int seed = 1;
132 
133   // tty->print_cr("seed %ld for %ld repeats...", seed, reps);
134   int num;
135   for (int k = 0; k < reps; k++) {
136     // Use next_random so the calculation is stateless.
137     num = seed = os::next_random(seed);
138     double u = (double)num / m;
139     ASSERT_TRUE(u >= 0.0 && u <= 1.0) << "bad random number!";
140 
141     // calculate mean and variance of the random sequence
142     mean += u;
143     variance += (u*u);
144   }
145   mean /= reps;
146   variance /= (reps - 1);
147 
148   ASSERT_EQ(num, 1043618065) << "bad seed";
149   // tty->print_cr("mean of the 1st 10000 numbers: %f", mean);
150   int intmean = (int)(mean*100);
151   ASSERT_EQ(intmean, 50);
152   // tty->print_cr("variance of the 1st 10000 numbers: %f", variance);
153   int intvariance = (int)(variance*100);
154   ASSERT_EQ(intvariance, 33);
155   const double eps = 0.0001;
156   t = fabsd(mean - 0.5018);
157   ASSERT_LT(t, eps) << "bad mean";
158   t = (variance - 0.3355) < 0.0 ? -(variance - 0.3355) : variance - 0.3355;
159   ASSERT_LT(t, eps) << "bad variance";
160 }
161 
162 #ifdef ASSERT
163 TEST_VM_ASSERT_MSG(os, page_size_for_region_with_zero_min_pages,
164                    "assert.min_pages > 0. failed: sanity") {
165   size_t region_size = 16 * os::vm_page_size();
166   os::page_size_for_region_aligned(region_size, 0); // should assert
167 }
168 #endif
169 
170 static void do_test_print_hex_dump(address addr, size_t len, int unitsize, const char* expected) {
171   char buf[256];
172   buf[0] = '\0';
173   stringStream ss(buf, sizeof(buf));
174   os::print_hex_dump(&ss, addr, addr + len, unitsize);
175   // tty->print_cr("expected: %s", expected);
176   // tty->print_cr("result: %s", buf);
177   EXPECT_THAT(buf, HasSubstr(expected));
178 }
179 
180 TEST_VM(os, test_print_hex_dump) {
181   const char* pattern [4] = {
182 #ifdef VM_LITTLE_ENDIAN
183     "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f",
184     "0100 0302 0504 0706 0908 0b0a 0d0c 0f0e",
185     "03020100 07060504 0b0a0908 0f0e0d0c",
186     "0706050403020100 0f0e0d0c0b0a0908"
187 #else
188     "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f",
189     "0001 0203 0405 0607 0809 0a0b 0c0d 0e0f",
190     "00010203 04050607 08090a0b 0c0d0e0f",
191     "0001020304050607 08090a0b0c0d0e0f"
192 #endif
193   };
194 
195   const char* pattern_not_readable [4] = {
196     "?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ??",
197     "???? ???? ???? ???? ???? ???? ???? ????",
198     "???????? ???????? ???????? ????????",
199     "???????????????? ????????????????"
200   };
201 
202   // On AIX, zero page is readable.
203   address unreadable =
204 #ifdef AIX
205     (address) 0xFFFFFFFFFFFF0000ULL;
206 #else
207     (address) 0
208 #endif
209     ;
210 
211   ResourceMark rm;
212   char buf[64];
213   stringStream ss(buf, sizeof(buf));
214   outputStream* out = &ss;
215 //  outputStream* out = tty; // enable for printout
216 
217   // Test dumping unreadable memory
218   // Exclude test for Windows for now, since it needs SEH handling to work which cannot be
219   // guaranteed when we call directly into VM code. (see JDK-8220220)
220 #ifndef _WIN32
221   do_test_print_hex_dump(unreadable, 100, 1, pattern_not_readable[0]);
222   do_test_print_hex_dump(unreadable, 100, 2, pattern_not_readable[1]);
223   do_test_print_hex_dump(unreadable, 100, 4, pattern_not_readable[2]);
224   do_test_print_hex_dump(unreadable, 100, 8, pattern_not_readable[3]);
225 #endif
226 
227   // Test dumping readable memory
228   address arr = (address)os::malloc(100, mtInternal);
229   for (u1 c = 0; c < 100; c++) {
230     arr[c] = c;
231   }
232 
233   // properly aligned
234   do_test_print_hex_dump(arr, 100, 1, pattern[0]);
235   do_test_print_hex_dump(arr, 100, 2, pattern[1]);
236   do_test_print_hex_dump(arr, 100, 4, pattern[2]);
237   do_test_print_hex_dump(arr, 100, 8, pattern[3]);
238 
239   // Not properly aligned. Should automatically down-align by unitsize
240   do_test_print_hex_dump(arr + 1, 100, 2, pattern[1]);
241   do_test_print_hex_dump(arr + 1, 100, 4, pattern[2]);
242   do_test_print_hex_dump(arr + 1, 100, 8, pattern[3]);
243 
244   os::free(arr);
245 }
246 
247 //////////////////////////////////////////////////////////////////////////////
248 // Test os::vsnprintf and friends.
249 
250 static void check_snprintf_result(int expected, size_t limit, int actual, bool expect_count) {
251   if (expect_count || ((size_t)expected < limit)) {
252     ASSERT_EQ(expected, actual);
253   } else {
254     ASSERT_GT(0, actual);
255   }
256 }
257 
258 // PrintFn is expected to be int (*)(char*, size_t, const char*, ...).
259 // But jio_snprintf is a C-linkage function with that signature, which
260 // has a different type on some platforms (like Solaris).
261 template<typename PrintFn>
262 static void test_snprintf(PrintFn pf, bool expect_count) {
263   const char expected[] = "abcdefghijklmnopqrstuvwxyz";
264   const int expected_len = sizeof(expected) - 1;
265   const size_t padding_size = 10;
266   char buffer[2 * (sizeof(expected) + padding_size)];
267   char check_buffer[sizeof(buffer)];
268   const char check_char = '1';  // Something not in expected.
269   memset(check_buffer, check_char, sizeof(check_buffer));
270   const size_t sizes_to_test[] = {
271     sizeof(buffer) - padding_size,       // Fits, with plenty of space to spare.
272     sizeof(buffer)/2,                    // Fits, with space to spare.
273     sizeof(buffer)/4,                    // Doesn't fit.
274     sizeof(expected) + padding_size + 1, // Fits, with a little room to spare
275     sizeof(expected) + padding_size,     // Fits exactly.
276     sizeof(expected) + padding_size - 1, // Doesn't quite fit.
277     2,                                   // One char + terminating NUL.
278     1,                                   // Only space for terminating NUL.
279     0 };                                 // No space at all.
280   for (unsigned i = 0; i < ARRAY_SIZE(sizes_to_test); ++i) {
281     memset(buffer, check_char, sizeof(buffer)); // To catch stray writes.
282     size_t test_size = sizes_to_test[i];
283     ResourceMark rm;
284     stringStream s;
285     s.print("test_size: " SIZE_FORMAT, test_size);
286     SCOPED_TRACE(s.as_string());
287     size_t prefix_size = padding_size;
288     guarantee(test_size <= (sizeof(buffer) - prefix_size), "invariant");
289     size_t write_size = MIN2(sizeof(expected), test_size);
290     size_t suffix_size = sizeof(buffer) - prefix_size - write_size;
291     char* write_start = buffer + prefix_size;
292     char* write_end = write_start + write_size;
293 
294     int result = pf(write_start, test_size, "%s", expected);
295 
296     check_snprintf_result(expected_len, test_size, result, expect_count);
297 
298     // Verify expected output.
299     if (test_size > 0) {
300       ASSERT_EQ(0, strncmp(write_start, expected, write_size - 1));
301       // Verify terminating NUL of output.
302       ASSERT_EQ('\0', write_start[write_size - 1]);
303     } else {
304       guarantee(test_size == 0, "invariant");
305       guarantee(write_size == 0, "invariant");
306       guarantee(prefix_size + suffix_size == sizeof(buffer), "invariant");
307       guarantee(write_start == write_end, "invariant");
308     }
309 
310     // Verify no scribbling on prefix or suffix.
311     ASSERT_EQ(0, strncmp(buffer, check_buffer, prefix_size));
312     ASSERT_EQ(0, strncmp(write_end, check_buffer, suffix_size));
313   }
314 
315   // Special case of 0-length buffer with empty (except for terminator) output.
316   check_snprintf_result(0, 0, pf(nullptr, 0, "%s", ""), expect_count);
317   check_snprintf_result(0, 0, pf(nullptr, 0, ""), expect_count);
318 }
319 
320 // This is probably equivalent to os::snprintf, but we're being
321 // explicit about what we're testing here.
322 static int vsnprintf_wrapper(char* buf, size_t len, const char* fmt, ...) {
323   va_list args;
324   va_start(args, fmt);
325   int result = os::vsnprintf(buf, len, fmt, args);
326   va_end(args);
327   return result;
328 }
329 
330 TEST_VM(os, vsnprintf) {
331   test_snprintf(vsnprintf_wrapper, true);
332 }
333 
334 TEST_VM(os, snprintf) {
335   test_snprintf(os::snprintf, true);
336 }
337 
338 // These are declared in jvm.h; test here, with related functions.
339 extern "C" {
340 int jio_vsnprintf(char*, size_t, const char*, va_list);
341 int jio_snprintf(char*, size_t, const char*, ...);
342 }
343 
344 // This is probably equivalent to jio_snprintf, but we're being
345 // explicit about what we're testing here.
346 static int jio_vsnprintf_wrapper(char* buf, size_t len, const char* fmt, ...) {
347   va_list args;
348   va_start(args, fmt);
349   int result = jio_vsnprintf(buf, len, fmt, args);
350   va_end(args);
351   return result;
352 }
353 
354 TEST_VM(os, jio_vsnprintf) {
355   test_snprintf(jio_vsnprintf_wrapper, false);
356 }
357 
358 TEST_VM(os, jio_snprintf) {
359   test_snprintf(jio_snprintf, false);
360 }
361 
362 #ifdef __APPLE__
363 // Not all macOS versions can use os::reserve_memory (i.e. anon_mmap) API
364 // to reserve executable memory, so before attempting to use it,
365 // we need to verify that we can do so by asking for a tiny executable
366 // memory chunk.
367 static inline bool can_reserve_executable_memory(void) {
368   bool executable = true;
369   size_t len = 128;
370   char* p = os::reserve_memory(len, executable);
371   bool exec_supported = (p != nullptr);
372   if (exec_supported) {
373     os::release_memory(p, len);
374   }
375   return exec_supported;
376 }
377 #endif
378 
379 // Test that os::release_memory() can deal with areas containing multiple mappings.
380 #define PRINT_MAPPINGS(s) { tty->print_cr("%s", s); os::print_memory_mappings((char*)p, total_range_len, tty); tty->cr(); }
381 //#define PRINT_MAPPINGS
382 
383 // Release a range allocated with reserve_multiple carefully, to not trip mapping
384 // asserts on Windows in os::release_memory()
385 static void carefully_release_multiple(address start, int num_stripes, size_t stripe_len) {
386   for (int stripe = 0; stripe < num_stripes; stripe++) {
387     address q = start + (stripe * stripe_len);
388     EXPECT_TRUE(os::release_memory((char*)q, stripe_len));
389   }
390 }
391 
392 #ifndef _AIX // JDK-8257041
393 // Reserve an area consisting of multiple mappings
394 //  (from multiple calls to os::reserve_memory)
395 static address reserve_multiple(int num_stripes, size_t stripe_len) {
396   assert(is_aligned(stripe_len, os::vm_allocation_granularity()), "Sanity");
397 
398 #ifdef __APPLE__
399   // Workaround: try reserving executable memory to figure out
400   // if such operation is supported on this macOS version
401   const bool exec_supported = can_reserve_executable_memory();
402 #endif
403 
404   address p = nullptr;
405   for (int tries = 0; tries < 256 && p == nullptr; tries ++) {
406     size_t total_range_len = num_stripes * stripe_len;
407     // Reserve a large contiguous area to get the address space...
408     p = (address)os::reserve_memory(total_range_len);
409     EXPECT_NE(p, (address)nullptr);
410     // .. release it...
411     EXPECT_TRUE(os::release_memory((char*)p, total_range_len));
412     // ... re-reserve in the same spot multiple areas...
413     for (int stripe = 0; stripe < num_stripes; stripe++) {
414       address q = p + (stripe * stripe_len);
415       // Commit, alternatingly with or without exec permission,
416       //  to prevent kernel from folding these mappings.
417 #ifdef __APPLE__
418       const bool executable = exec_supported ? (stripe % 2 == 0) : false;
419 #else
420       const bool executable = stripe % 2 == 0;
421 #endif
422       q = (address)os::attempt_reserve_memory_at((char*)q, stripe_len, executable);
423       if (q == nullptr) {
424         // Someone grabbed that area concurrently. Cleanup, then retry.
425         tty->print_cr("reserve_multiple: retry (%d)...", stripe);
426         carefully_release_multiple(p, stripe, stripe_len);
427         p = nullptr;
428       } else {
429         EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, executable));
430       }
431     }
432   }
433   return p;
434 }
435 #endif // !AIX
436 
437 // Reserve an area with a single call to os::reserve_memory,
438 //  with multiple committed and uncommitted regions
439 static address reserve_one_commit_multiple(int num_stripes, size_t stripe_len) {
440   assert(is_aligned(stripe_len, os::vm_allocation_granularity()), "Sanity");
441   size_t total_range_len = num_stripes * stripe_len;
442   address p = (address)os::reserve_memory(total_range_len);
443   EXPECT_NE(p, (address)nullptr);
444   for (int stripe = 0; stripe < num_stripes; stripe++) {
445     address q = p + (stripe * stripe_len);
446     if (stripe % 2 == 0) {
447       EXPECT_TRUE(os::commit_memory((char*)q, stripe_len, false));
448     }
449   }
450   return p;
451 }
452 
453 #ifdef _WIN32
454 struct NUMASwitcher {
455   const bool _b;
456   NUMASwitcher(bool v): _b(UseNUMAInterleaving) { UseNUMAInterleaving = v; }
457   ~NUMASwitcher() { UseNUMAInterleaving = _b; }
458 };
459 #endif
460 
461 #ifndef _AIX // JDK-8257041
462 TEST_VM(os, release_multi_mappings) {
463 
464   // With NMT enabled, this will trigger JDK-8263464. For now disable the test if NMT=on.
465   if (MemTracker::tracking_level() > NMT_off) {
466     return;
467   }
468 
469   // Test that we can release an area created with multiple reservation calls
470   // What we do:
471   // A) we reserve 6 small segments (stripes) adjacent to each other. We commit
472   //    them with alternating permissions to prevent the kernel from folding them into
473   //    a single segment.
474   //    -stripe-stripe-stripe-stripe-stripe-stripe-
475   // B) we release the middle four stripes with a single os::release_memory call. This
476   //    tests that os::release_memory indeed works across multiple segments created with
477   //    multiple os::reserve calls.
478   //    -stripe-___________________________-stripe-
479   // C) Into the now vacated address range between the first and the last stripe, we
480   //    re-reserve a new memory range. We expect this to work as a proof that the address
481   //    range was really released by the single release call (B).
482   //
483   // Note that this is inherently racy. Between (B) and (C), some other thread may have
484   //  reserved something into the hole in the meantime. Therefore we keep that range small and
485   //  entrenched between the first and last stripe, which reduces the chance of some concurrent
486   //  thread grabbing that memory.
487 
488   const size_t stripe_len = os::vm_allocation_granularity();
489   const int num_stripes = 6;
490   const size_t total_range_len = stripe_len * num_stripes;
491 
492   // reserve address space...
493   address p = reserve_multiple(num_stripes, stripe_len);
494   ASSERT_NE(p, (address)nullptr);
495   PRINT_MAPPINGS("A");
496 
497   // .. release the middle stripes...
498   address p_middle_stripes = p + stripe_len;
499   const size_t middle_stripe_len = (num_stripes - 2) * stripe_len;
500   {
501     // On Windows, temporarily switch on UseNUMAInterleaving to allow release_memory to release
502     //  multiple mappings in one go (otherwise we assert, which we test too, see death test below).
503     WINDOWS_ONLY(NUMASwitcher b(true);)
504     ASSERT_TRUE(os::release_memory((char*)p_middle_stripes, middle_stripe_len));
505   }
506   PRINT_MAPPINGS("B");
507 
508   // ...re-reserve the middle stripes. This should work unless release silently failed.
509   address p2 = (address)os::attempt_reserve_memory_at((char*)p_middle_stripes, middle_stripe_len);
510 
511   ASSERT_EQ(p2, p_middle_stripes);
512 
513   PRINT_MAPPINGS("C");
514 
515   // Clean up. Release all mappings.
516   {
517     WINDOWS_ONLY(NUMASwitcher b(true);) // allow release_memory to release multiple regions
518     ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
519   }
520 }
521 #endif // !AIX
522 
523 #ifdef _WIN32
524 // On Windows, test that we recognize bad ranges.
525 //  On debug this would assert. Test that too.
526 //  On other platforms, we are unable to recognize bad ranges.
527 #ifdef ASSERT
528 TEST_VM_ASSERT_MSG(os, release_bad_ranges, ".*bad release") {
529 #else
530 TEST_VM(os, release_bad_ranges) {
531 #endif
532   char* p = os::reserve_memory(4 * M);
533   ASSERT_NE(p, (char*)nullptr);
534   // Release part of range
535   ASSERT_FALSE(os::release_memory(p, M));
536   // Release part of range
537   ASSERT_FALSE(os::release_memory(p + M, M));
538   // Release more than the range (explicitly switch off NUMA here
539   //  to make os::release_memory() test more strictly and to not
540   //  accidentally release neighbors)
541   {
542     NUMASwitcher b(false);
543     ASSERT_FALSE(os::release_memory(p, M * 5));
544     ASSERT_FALSE(os::release_memory(p - M, M * 5));
545     ASSERT_FALSE(os::release_memory(p - M, M * 6));
546   }
547 
548   ASSERT_TRUE(os::release_memory(p, 4 * M)); // Release for real
549   ASSERT_FALSE(os::release_memory(p, 4 * M)); // Again, should fail
550 }
551 #endif // _WIN32
552 
553 TEST_VM(os, release_one_mapping_multi_commits) {
554   // Test that we can release an area consisting of interleaved
555   //  committed and uncommitted regions:
556   const size_t stripe_len = os::vm_allocation_granularity();
557   const int num_stripes = 6;
558   const size_t total_range_len = stripe_len * num_stripes;
559 
560   // reserve address space...
561   address p = reserve_one_commit_multiple(num_stripes, stripe_len);
562   PRINT_MAPPINGS("A");
563   ASSERT_NE(p, (address)nullptr);
564 
565   // // make things even more difficult by trying to reserve at the border of the region
566   address border = p + num_stripes * stripe_len;
567   address p2 = (address)os::attempt_reserve_memory_at((char*)border, stripe_len);
568   PRINT_MAPPINGS("B");
569 
570   ASSERT_TRUE(p2 == nullptr || p2 == border);
571 
572   ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
573   PRINT_MAPPINGS("C");
574 
575   if (p2 != nullptr) {
576     ASSERT_TRUE(os::release_memory((char*)p2, stripe_len));
577     PRINT_MAPPINGS("D");
578   }
579 }
580 
581 static void test_show_mappings(address start, size_t size) {
582   // Note: should this overflow, thats okay. stream will silently truncate. Does not matter for the test.
583   const size_t buflen = 4 * M;
584   char* buf = NEW_C_HEAP_ARRAY(char, buflen, mtInternal);
585   buf[0] = '\0';
586   stringStream ss(buf, buflen);
587   if (start != nullptr) {
588     os::print_memory_mappings((char*)start, size, &ss);
589   } else {
590     os::print_memory_mappings(&ss); // prints full address space
591   }
592   // Still an empty implementation on MacOS and AIX
593 #if defined(LINUX) || defined(_WIN32)
594   EXPECT_NE(buf[0], '\0');
595 #endif
596   // buf[buflen - 1] = '\0';
597   // tty->print_raw(buf);
598   FREE_C_HEAP_ARRAY(char, buf);
599 }
600 
601 TEST_VM(os, show_mappings_small_range) {
602   test_show_mappings((address)0x100000, 2 * G);
603 }
604 
605 TEST_VM(os, show_mappings_full_range) {
606   // Reserve a small range and fill it with a marker string, should show up
607   // on implementations displaying range snippets
608   char* p = os::reserve_memory(1 * M, false, mtInternal);
609   if (p != nullptr) {
610     if (os::commit_memory(p, 1 * M, false)) {
611       strcpy(p, "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
612     }
613   }
614   test_show_mappings(nullptr, 0);
615   if (p != nullptr) {
616     os::release_memory(p, 1 * M);
617   }
618 }
619 
620 #ifdef _WIN32
621 // Test os::win32::find_mapping
622 TEST_VM(os, find_mapping_simple) {
623   const size_t total_range_len = 4 * M;
624   os::win32::mapping_info_t mapping_info;
625 
626   // Some obvious negatives
627   ASSERT_FALSE(os::win32::find_mapping((address)nullptr, &mapping_info));
628   ASSERT_FALSE(os::win32::find_mapping((address)4711, &mapping_info));
629 
630   // A simple allocation
631   {
632     address p = (address)os::reserve_memory(total_range_len);
633     ASSERT_NE(p, (address)nullptr);
634     PRINT_MAPPINGS("A");
635     for (size_t offset = 0; offset < total_range_len; offset += 4711) {
636       ASSERT_TRUE(os::win32::find_mapping(p + offset, &mapping_info));
637       ASSERT_EQ(mapping_info.base, p);
638       ASSERT_EQ(mapping_info.regions, 1);
639       ASSERT_EQ(mapping_info.size, total_range_len);
640       ASSERT_EQ(mapping_info.committed_size, 0);
641     }
642     // Test just outside the allocation
643     if (os::win32::find_mapping(p - 1, &mapping_info)) {
644       ASSERT_NE(mapping_info.base, p);
645     }
646     if (os::win32::find_mapping(p + total_range_len, &mapping_info)) {
647       ASSERT_NE(mapping_info.base, p);
648     }
649     ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
650     PRINT_MAPPINGS("B");
651     ASSERT_FALSE(os::win32::find_mapping(p, &mapping_info));
652   }
653 }
654 
655 TEST_VM(os, find_mapping_2) {
656   // A more complex allocation, consisting of multiple regions.
657   const size_t total_range_len = 4 * M;
658   os::win32::mapping_info_t mapping_info;
659 
660   const size_t stripe_len = total_range_len / 4;
661   address p = reserve_one_commit_multiple(4, stripe_len);
662   ASSERT_NE(p, (address)nullptr);
663   PRINT_MAPPINGS("A");
664   for (size_t offset = 0; offset < total_range_len; offset += 4711) {
665     ASSERT_TRUE(os::win32::find_mapping(p + offset, &mapping_info));
666     ASSERT_EQ(mapping_info.base, p);
667     ASSERT_EQ(mapping_info.regions, 4);
668     ASSERT_EQ(mapping_info.size, total_range_len);
669     ASSERT_EQ(mapping_info.committed_size, total_range_len / 2);
670   }
671   // Test just outside the allocation
672   if (os::win32::find_mapping(p - 1, &mapping_info)) {
673     ASSERT_NE(mapping_info.base, p);
674   }
675   if (os::win32::find_mapping(p + total_range_len, &mapping_info)) {
676     ASSERT_NE(mapping_info.base, p);
677   }
678   ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
679   PRINT_MAPPINGS("B");
680   ASSERT_FALSE(os::win32::find_mapping(p, &mapping_info));
681 }
682 
683 TEST_VM(os, find_mapping_3) {
684   const size_t total_range_len = 4 * M;
685   os::win32::mapping_info_t mapping_info;
686 
687   // A more complex case, consisting of multiple allocations.
688   {
689     const size_t stripe_len = total_range_len / 4;
690     address p = reserve_multiple(4, stripe_len);
691     ASSERT_NE(p, (address)nullptr);
692     PRINT_MAPPINGS("E");
693     for (int stripe = 0; stripe < 4; stripe++) {
694       ASSERT_TRUE(os::win32::find_mapping(p + (stripe * stripe_len), &mapping_info));
695       ASSERT_EQ(mapping_info.base, p + (stripe * stripe_len));
696       ASSERT_EQ(mapping_info.regions, 1);
697       ASSERT_EQ(mapping_info.size, stripe_len);
698       ASSERT_EQ(mapping_info.committed_size, stripe_len);
699     }
700     carefully_release_multiple(p, 4, stripe_len);
701     PRINT_MAPPINGS("F");
702     ASSERT_FALSE(os::win32::find_mapping(p, &mapping_info));
703   }
704 }
705 #endif // _WIN32
706 
707 TEST_VM(os, os_pagesizes) {
708   ASSERT_EQ(os::min_page_size(), 4 * K);
709   ASSERT_LE(os::min_page_size(), os::vm_page_size());
710   // The vm_page_size should be the smallest in the set of allowed page sizes
711   // (contract says "default" page size but a lot of code actually assumes
712   //  this to be the smallest page size; notable, deliberate exception is
713   //  AIX which can have smaller page sizes but those are not part of the
714   //  page_sizes() set).
715   ASSERT_EQ(os::page_sizes().smallest(), os::vm_page_size());
716   // The large page size, if it exists, shall be part of the set
717   if (UseLargePages) {
718     ASSERT_GT(os::large_page_size(), os::vm_page_size());
719     ASSERT_TRUE(os::page_sizes().contains(os::large_page_size()));
720   }
721   os::page_sizes().print_on(tty);
722   tty->cr();
723 }
724 
725 static const int min_page_size_log2 = exact_log2(os::min_page_size());
726 static const int max_page_size_log2 = (int)BitsPerWord;
727 
728 TEST_VM(os, pagesizes_test_range) {
729   for (int bit = min_page_size_log2; bit < max_page_size_log2; bit++) {
730     for (int bit2 = min_page_size_log2; bit2 < max_page_size_log2; bit2++) {
731       const size_t s =  (size_t)1 << bit;
732       const size_t s2 = (size_t)1 << bit2;
733       os::PageSizes pss;
734       ASSERT_EQ((size_t)0, pss.smallest());
735       ASSERT_EQ((size_t)0, pss.largest());
736       // one size set
737       pss.add(s);
738       ASSERT_TRUE(pss.contains(s));
739       ASSERT_EQ(s, pss.smallest());
740       ASSERT_EQ(s, pss.largest());
741       ASSERT_EQ(pss.next_larger(s), (size_t)0);
742       ASSERT_EQ(pss.next_smaller(s), (size_t)0);
743       // two set
744       pss.add(s2);
745       ASSERT_TRUE(pss.contains(s2));
746       if (s2 < s) {
747         ASSERT_EQ(s2, pss.smallest());
748         ASSERT_EQ(s, pss.largest());
749         ASSERT_EQ(pss.next_larger(s2), (size_t)s);
750         ASSERT_EQ(pss.next_smaller(s2), (size_t)0);
751         ASSERT_EQ(pss.next_larger(s), (size_t)0);
752         ASSERT_EQ(pss.next_smaller(s), (size_t)s2);
753       } else if (s2 > s) {
754         ASSERT_EQ(s, pss.smallest());
755         ASSERT_EQ(s2, pss.largest());
756         ASSERT_EQ(pss.next_larger(s), (size_t)s2);
757         ASSERT_EQ(pss.next_smaller(s), (size_t)0);
758         ASSERT_EQ(pss.next_larger(s2), (size_t)0);
759         ASSERT_EQ(pss.next_smaller(s2), (size_t)s);
760       }
761       for (int bit3 = min_page_size_log2; bit3 < max_page_size_log2; bit3++) {
762         const size_t s3 = (size_t)1 << bit3;
763         ASSERT_EQ(s3 == s || s3 == s2, pss.contains(s3));
764       }
765     }
766   }
767 }
768 
769 TEST_VM(os, pagesizes_test_print) {
770   os::PageSizes pss;
771   const size_t sizes[] = { 16 * K, 64 * K, 128 * K, 1 * M, 4 * M, 1 * G, 2 * G, 0 };
772   static const char* const expected = "16k, 64k, 128k, 1M, 4M, 1G, 2G";
773   for (int i = 0; sizes[i] != 0; i++) {
774     pss.add(sizes[i]);
775   }
776   char buffer[256];
777   stringStream ss(buffer, sizeof(buffer));
778   pss.print_on(&ss);
779   EXPECT_STREQ(expected, buffer);
780 }
781 
782 TEST_VM(os, dll_address_to_function_and_library_name) {
783   char tmp[1024];
784   char output[1024];
785   stringStream st(output, sizeof(output));
786 
787 #define EXPECT_CONTAINS(haystack, needle) \
788   EXPECT_THAT(haystack, HasSubstr(needle));
789 #define EXPECT_DOES_NOT_CONTAIN(haystack, needle) \
790   EXPECT_THAT(haystack, Not(HasSubstr(needle)));
791 // #define LOG(...) tty->print_cr(__VA_ARGS__); // enable if needed
792 #define LOG(...)
793 
794   // Invalid addresses
795   LOG("os::print_function_and_library_name(st, -1) expects FALSE.");
796   address addr = (address)(intptr_t)-1;
797   EXPECT_FALSE(os::print_function_and_library_name(&st, addr));
798   LOG("os::print_function_and_library_name(st, nullptr) expects FALSE.");
799   addr = nullptr;
800   EXPECT_FALSE(os::print_function_and_library_name(&st, addr));
801 
802   // Valid addresses
803   // Test with or without shorten-paths, demangle, and scratch buffer
804   for (int i = 0; i < 16; i++) {
805     const bool shorten_paths = (i & 1) != 0;
806     const bool demangle = (i & 2) != 0;
807     const bool strip_arguments = (i & 4) != 0;
808     const bool provide_scratch_buffer = (i & 8) != 0;
809     LOG("shorten_paths=%d, demangle=%d, strip_arguments=%d, provide_scratch_buffer=%d",
810         shorten_paths, demangle, strip_arguments, provide_scratch_buffer);
811 
812     // Should show os::min_page_size in libjvm
813     addr = CAST_FROM_FN_PTR(address, Threads::create_vm);
814     st.reset();
815     EXPECT_TRUE(os::print_function_and_library_name(&st, addr,
816                                                     provide_scratch_buffer ? tmp : nullptr,
817                                                     sizeof(tmp),
818                                                     shorten_paths, demangle,
819                                                     strip_arguments));
820     EXPECT_CONTAINS(output, "Threads");
821     EXPECT_CONTAINS(output, "create_vm");
822     EXPECT_CONTAINS(output, "jvm"); // "jvm.dll" or "libjvm.so" or similar
823     LOG("%s", output);
824 
825     // Test truncation on scratch buffer
826     if (provide_scratch_buffer) {
827       st.reset();
828       tmp[10] = 'X';
829       EXPECT_TRUE(os::print_function_and_library_name(&st, addr, tmp, 10,
830                                                       shorten_paths, demangle));
831       EXPECT_EQ(tmp[10], 'X');
832       LOG("%s", output);
833     }
834   }
835 }
836 
837 // Not a regex! Very primitive, just match:
838 // "d" - digit
839 // "a" - ascii
840 // "." - everything
841 // rest must match
842 static bool very_simple_string_matcher(const char* pattern, const char* s) {
843   const size_t lp = strlen(pattern);
844   const size_t ls = strlen(s);
845   if (ls < lp) {
846     return false;
847   }
848   for (size_t i = 0; i < lp; i ++) {
849     switch (pattern[i]) {
850       case '.': continue;
851       case 'd': if (!isdigit(s[i])) return false; break;
852       case 'a': if (!isascii(s[i])) return false; break;
853       default: if (s[i] != pattern[i]) return false; break;
854     }
855   }
856   return true;
857 }
858 
859 TEST_VM(os, iso8601_time) {
860   char buffer[os::iso8601_timestamp_size + 1]; // + space for canary
861   buffer[os::iso8601_timestamp_size] = 'X'; // canary
862   const char* result = nullptr;
863   // YYYY-MM-DDThh:mm:ss.mmm+zzzz
864   const char* const pattern_utc = "dddd-dd-dd.dd:dd:dd.ddd.0000";
865   const char* const pattern_local = "dddd-dd-dd.dd:dd:dd.ddd.dddd";
866 
867   result = os::iso8601_time(buffer, sizeof(buffer), true);
868   tty->print_cr("%s", result);
869   EXPECT_EQ(result, buffer);
870   EXPECT_TRUE(very_simple_string_matcher(pattern_utc, result));
871 
872   result = os::iso8601_time(buffer, sizeof(buffer), false);
873   tty->print_cr("%s", result);
874   EXPECT_EQ(result, buffer);
875   EXPECT_TRUE(very_simple_string_matcher(pattern_local, result));
876 
877   // Test with explicit timestamps
878   result = os::iso8601_time(0, buffer, sizeof(buffer), true);
879   tty->print_cr("%s", result);
880   EXPECT_EQ(result, buffer);
881   EXPECT_TRUE(very_simple_string_matcher("1970-01-01.00:00:00.000+0000", result));
882 
883   result = os::iso8601_time(17, buffer, sizeof(buffer), true);
884   tty->print_cr("%s", result);
885   EXPECT_EQ(result, buffer);
886   EXPECT_TRUE(very_simple_string_matcher("1970-01-01.00:00:00.017+0000", result));
887 
888   // Canary should still be intact
889   EXPECT_EQ(buffer[os::iso8601_timestamp_size], 'X');
890 }
891 
892 TEST_VM(os, is_first_C_frame) {
893 #if !defined(_WIN32) && !defined(ZERO) && !defined(__thumb__)
894   frame invalid_frame;
895   EXPECT_TRUE(os::is_first_C_frame(&invalid_frame)); // the frame has zeroes for all values
896 
897   frame cur_frame = os::current_frame(); // this frame has to have a sender
898   EXPECT_FALSE(os::is_first_C_frame(&cur_frame));
899 #endif // _WIN32
900 }
901 
902 #ifdef __GLIBC__
903 TEST_VM(os, trim_native_heap) {
904   EXPECT_TRUE(os::can_trim_native_heap());
905   os::size_change_t sc;
906   sc.before = sc.after = (size_t)-1;
907   EXPECT_TRUE(os::trim_native_heap(&sc));
908   tty->print_cr(SIZE_FORMAT "->" SIZE_FORMAT, sc.before, sc.after);
909   // Regardless of whether we freed memory, both before and after
910   // should be somewhat believable numbers (RSS).
911   const size_t min = 5 * M;
912   const size_t max = LP64_ONLY(20 * G) NOT_LP64(3 * G);
913   ASSERT_LE(min, sc.before);
914   ASSERT_GT(max, sc.before);
915   ASSERT_LE(min, sc.after);
916   ASSERT_GT(max, sc.after);
917   // Should also work
918   EXPECT_TRUE(os::trim_native_heap());
919 }
920 #else
921 TEST_VM(os, trim_native_heap) {
922   EXPECT_FALSE(os::can_trim_native_heap());
923 }
924 #endif // __GLIBC__
925 
926 TEST_VM(os, open_O_CLOEXEC) {
927 #if !defined(_WIN32)
928   int fd = os::open("test_file.txt", O_RDWR | O_CREAT | O_TRUNC, 0666); // open will use O_CLOEXEC
929   EXPECT_TRUE(fd > 0);
930   int flags = ::fcntl(fd, F_GETFD);
931   EXPECT_TRUE((flags & FD_CLOEXEC) != 0); // if O_CLOEXEC worked, then FD_CLOEXEC should be ON
932   ::close(fd);
933 #endif
934 }
935 
936 TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_smallpages) {
937   char* p1 = os::reserve_memory(M, false, mtTest);
938   ASSERT_NE(p1, nullptr);
939   char* p2 = os::attempt_reserve_memory_at(p1, M);
940   ASSERT_EQ(p2, nullptr); // should have failed
941   os::release_memory(p1, M);
942 }
943 
944 TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_largepages) {
945   if (UseLargePages && !os::can_commit_large_page_memory()) { // aka special
946     const size_t lpsz = os::large_page_size();
947     char* p1 = os::reserve_memory_aligned(lpsz, lpsz, false);
948     ASSERT_NE(p1, nullptr);
949     char* p2 = os::reserve_memory_special(lpsz, lpsz, lpsz, p1, false);
950     ASSERT_EQ(p2, nullptr); // should have failed
951     os::release_memory(p1, M);
952   } else {
953     tty->print_cr("Skipped.");
954   }
955 }
956 
957 #ifdef AIX
958 // On Aix, we should fail attach attempts not aligned to segment boundaries (256m)
959 TEST_VM(os, aix_reserve_at_non_shmlba_aligned_address) {
960   if (Use64KPages) {
961     char* p = os::attempt_reserve_memory_at((char*)0x1f00000, M);
962     ASSERT_EQ(p, nullptr); // should have failed
963     p = os::attempt_reserve_memory_at((char*)((64 * G) + M), M);
964     ASSERT_EQ(p, nullptr); // should have failed
965   }
966 }
967 #endif // AIX
968 
969 TEST_VM(os, vm_min_address) {
970   size_t s = os::vm_min_address();
971   ASSERT_GE(s, M);
972   // Test upper limit. On Linux, its adjustable, so we just test for absurd values to prevent errors
973   // with high vm.mmap_min_addr settings.
974 #if defined(_LP64)
975   ASSERT_LE(s, NOT_LINUX(G * 4) LINUX_ONLY(G * 1024));
976 #endif
977 }
978