1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP
26 #define SHARE_OOPS_ACCESSBACKEND_HPP
27
28 #include "cppstdlib/type_traits.hpp"
29 #include "gc/shared/barrierSetConfig.hpp"
30 #include "memory/allocation.hpp"
31 #include "metaprogramming/enableIf.hpp"
32 #include "oops/accessDecorators.hpp"
33 #include "oops/inlineKlass.hpp"
34 #include "oops/oopsHierarchy.hpp"
35 #include "runtime/globals.hpp"
36 #include "utilities/debug.hpp"
37 #include "utilities/globalDefinitions.hpp"
38
39 // Result from oop_arraycopy
40 enum class OopCopyResult {
41 ok, // oop array copy sucessful
42 failed_check_class_cast, // oop array copy failed subtype check (ARRAYCOPY_CHECKCAST)
43 failed_check_null // oop array copy failed null check (ARRAYCOPY_NOTNULL)
44 };
45
46 // This metafunction returns either oop or narrowOop depending on whether
47 // an access needs to use compressed oops or not.
48 template <DecoratorSet decorators>
49 struct HeapOopType: AllStatic {
50 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
51 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
52 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
53 };
54
55 // This meta-function returns either oop or narrowOop depending on whether
56 // a back-end needs to consider compressed oops types or not.
57 template <DecoratorSet decorators>
58 struct ValueOopType: AllStatic {
59 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
60 using type = std::conditional_t<needs_oop_compress, narrowOop, oop>;
61 };
62
63 namespace AccessInternal {
64 enum BarrierType {
65 BARRIER_STORE,
66 BARRIER_STORE_AT,
67 BARRIER_LOAD,
68 BARRIER_LOAD_AT,
69 BARRIER_ATOMIC_CMPXCHG,
70 BARRIER_ATOMIC_CMPXCHG_AT,
71 BARRIER_ATOMIC_XCHG,
72 BARRIER_ATOMIC_XCHG_AT,
73 BARRIER_ARRAYCOPY,
74 BARRIER_CLONE,
75 BARRIER_VALUE_COPY,
76 BARRIER_VALUE_STORE_NULL,
77 };
78
79 template <DecoratorSet decorators, typename T>
80 struct MustConvertCompressedOop: public std::integral_constant<bool,
81 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
82 std::is_same<typename HeapOopType<decorators>::type, narrowOop>::value &&
83 std::is_same<T, oop>::value> {};
84
85 // This metafunction returns an appropriate oop type if the value is oop-like
86 // and otherwise returns the same type T.
87 template <DecoratorSet decorators, typename T>
88 struct EncodedType: AllStatic {
89 using type = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
90 typename HeapOopType<decorators>::type,
91 T>;
92 };
93
94 template <DecoratorSet decorators>
95 inline typename HeapOopType<decorators>::type*
96 oop_field_addr(oop base, ptrdiff_t byte_offset) {
97 return reinterpret_cast<typename HeapOopType<decorators>::type*>(
98 reinterpret_cast<intptr_t>((void*)base) + byte_offset);
99 }
100
101 template <DecoratorSet decorators, typename T>
102 struct AccessFunctionTypes {
103 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
104 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
105 typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
106 typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
107
108 typedef T (*load_func_t)(void* addr);
109 typedef void (*store_func_t)(void* addr, T value);
110 typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
111 typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
112
113 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
114 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
115 size_t length);
116 typedef void (*clone_func_t)(oop src, oop dst, size_t size);
117 typedef void (*value_copy_func_t)(const ValuePayload& src, const ValuePayload& dst);
118 typedef void (*value_store_null_func_t)(const ValuePayload& dst);
119 };
120
121 template <DecoratorSet decorators>
122 struct AccessFunctionTypes<decorators, void> {
123 typedef OopCopyResult (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
124 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
125 size_t length);
126 };
127
128 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
129
130 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
131 template <DecoratorSet decorators, typename T> \
132 struct AccessFunction<decorators, T, bt>: AllStatic{ \
133 typedef typename AccessFunctionTypes<decorators, T>::func type; \
134 }
135 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
136 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
137 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
138 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
139 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
140 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
141 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
142 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
143 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
144 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
145 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t);
146 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_STORE_NULL, value_store_null_func_t);
147 #undef ACCESS_GENERATE_ACCESS_FUNCTION
148
149 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
150 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
151
152 template <DecoratorSet decorators, typename T, BarrierType barrier_type>
153 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
154
155 void* field_addr(oop base, ptrdiff_t offset);
156
157 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
158 // faster build times, given how frequently included access is.
159 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
160 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
161 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
162
163 void arraycopy_disjoint_words(void* src, void* dst, size_t length);
164 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
165
166 template<typename T>
167 void arraycopy_conjoint(T* src, T* dst, size_t length);
168 template<typename T>
169 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
170 template<typename T>
171 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
172
173 void value_copy_internal(void* src, void* dst, size_t length);
174 void value_store_null(void* dst, size_t length);
175 }
176
177 // This mask specifies what decorators are relevant for raw accesses. When passing
178 // accesses to the raw layer, irrelevant decorators are removed.
179 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
180 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL;
181
182 // The RawAccessBarrier performs raw accesses with additional knowledge of
183 // memory ordering, so that OrderAccess/Atomic is called when necessary.
184 // It additionally handles compressed oops, and hence is not completely "raw"
185 // strictly speaking.
186 template <DecoratorSet decorators>
187 class RawAccessBarrier: public AllStatic {
188 protected:
189 static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
190 return AccessInternal::field_addr(base, byte_offset);
191 }
192
193 protected:
194 // Only encode if INTERNAL_VALUE_IS_OOP
195 template <DecoratorSet idecorators, typename T>
196 static inline typename EnableIf<
197 AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
198 typename HeapOopType<idecorators>::type>::type
199 encode_internal(T value);
200
201 template <DecoratorSet idecorators, typename T>
202 static inline typename EnableIf<
203 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
204 encode_internal(T value) {
205 return value;
206 }
207
208 template <typename T>
209 static inline typename AccessInternal::EncodedType<decorators, T>::type
210 encode(T value) {
211 return encode_internal<decorators, T>(value);
212 }
213
214 // Only decode if INTERNAL_VALUE_IS_OOP
215 template <DecoratorSet idecorators, typename T>
216 static inline typename EnableIf<
217 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
218 decode_internal(typename HeapOopType<idecorators>::type value);
219
220 template <DecoratorSet idecorators, typename T>
221 static inline typename EnableIf<
222 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
223 decode_internal(T value) {
224 return value;
225 }
226
227 template <typename T>
228 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
229 return decode_internal<decorators, T>(value);
230 }
231
232 protected:
233 template <DecoratorSet ds, typename T>
234 static typename EnableIf<
235 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
236 load_internal(void* addr);
237
238 template <DecoratorSet ds, typename T>
239 static typename EnableIf<
240 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
241 load_internal(void* addr);
242
243 template <DecoratorSet ds, typename T>
244 static typename EnableIf<
245 HasDecorator<ds, MO_RELAXED>::value, T>::type
246 load_internal(void* addr);
247
248 template <DecoratorSet ds, typename T>
249 static inline typename EnableIf<
250 HasDecorator<ds, MO_UNORDERED>::value, T>::type
251 load_internal(void* addr) {
252 return *reinterpret_cast<T*>(addr);
253 }
254
255 template <DecoratorSet ds, typename T>
256 static typename EnableIf<
257 HasDecorator<ds, MO_SEQ_CST>::value>::type
258 store_internal(void* addr, T value);
259
260 template <DecoratorSet ds, typename T>
261 static typename EnableIf<
262 HasDecorator<ds, MO_RELEASE>::value>::type
263 store_internal(void* addr, T value);
264
265 template <DecoratorSet ds, typename T>
266 static typename EnableIf<
267 HasDecorator<ds, MO_RELAXED>::value>::type
268 store_internal(void* addr, T value);
269
270 template <DecoratorSet ds, typename T>
271 static inline typename EnableIf<
272 HasDecorator<ds, MO_UNORDERED>::value>::type
273 store_internal(void* addr, T value) {
274 *reinterpret_cast<T*>(addr) = value;
275 }
276
277 template <DecoratorSet ds, typename T>
278 static typename EnableIf<
279 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
280 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
281
282 template <DecoratorSet ds, typename T>
283 static typename EnableIf<
284 HasDecorator<ds, MO_RELAXED>::value, T>::type
285 atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
286
287 template <DecoratorSet ds, typename T>
288 static typename EnableIf<
289 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
290 atomic_xchg_internal(void* addr, T new_value);
291
292 public:
293 template <typename T>
294 static inline void store(void* addr, T value) {
295 store_internal<decorators>(addr, value);
296 }
297
298 template <typename T>
299 static inline T load(void* addr) {
300 return load_internal<decorators, T>(addr);
301 }
302
303 template <typename T>
304 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
305 return atomic_cmpxchg_internal<decorators>(addr, compare_value, new_value);
306 }
307
308 template <typename T>
309 static inline T atomic_xchg(void* addr, T new_value) {
310 return atomic_xchg_internal<decorators>(addr, new_value);
311 }
312
313 template <typename T>
314 static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
315 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
316 size_t length);
317
318 template <typename T>
319 static void oop_store(void* addr, T value);
320 template <typename T>
321 static void oop_store_at(oop base, ptrdiff_t offset, T value);
322
323 template <typename T>
324 static T oop_load(void* addr);
325 template <typename T>
326 static T oop_load_at(oop base, ptrdiff_t offset);
327
328 template <typename T>
329 static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
330 template <typename T>
331 static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
332
333 template <typename T>
334 static T oop_atomic_xchg(void* addr, T new_value);
335 template <typename T>
336 static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
337
338 template <typename T>
339 static void store_at(oop base, ptrdiff_t offset, T value) {
340 store(field_addr(base, offset), value);
341 }
342
343 template <typename T>
344 static T load_at(oop base, ptrdiff_t offset) {
345 return load<T>(field_addr(base, offset));
346 }
347
348 template <typename T>
349 static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
350 return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
351 }
352
353 template <typename T>
354 static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
355 return atomic_xchg(field_addr(base, offset), new_value);
356 }
357
358 template <typename T>
359 static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
360 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
361 size_t length);
362
363 static void clone(oop src, oop dst, size_t size);
364 static void value_copy(const ValuePayload& src, const ValuePayload& dst);
365 static void value_store_null(const ValuePayload& dst);
366 };
367
368 namespace AccessInternal {
369 DEBUG_ONLY(void check_access_thread_state());
370 #define assert_access_thread_state() DEBUG_ONLY(check_access_thread_state())
371 }
372
373 // Below is the implementation of the first 4 steps of the template pipeline:
374 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
375 // and sets default decorators to sensible values.
376 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
377 // multiple types. The P type of the address and T type of the value must
378 // match.
379 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
380 // avoided, and in that case avoids it (calling raw accesses or
381 // primitive accesses in a build that does not require primitive GC barriers)
382 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
383 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers
384 // to the access.
385
386 namespace AccessInternal {
387 template <typename T>
388 struct OopOrNarrowOopInternal: AllStatic {
389 typedef oop type;
390 };
391
392 template <>
393 struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
394 typedef narrowOop type;
395 };
396
397 // This metafunction returns a canonicalized oop/narrowOop type for a passed
398 // in oop-like types passed in from oop_* overloads where the user has sworn
399 // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
400 // narrowOoop, instanceOopDesc*, and random other things).
401 // In the oop_* overloads, it must hold that if the passed in type T is not
402 // narrowOop, then it by contract has to be one of many oop-like types implicitly
403 // convertible to oop, and hence returns oop as the canonical oop type.
404 // If it turns out it was not, then the implicit conversion to oop will fail
405 // to compile, as desired.
406 template <typename T>
407 struct OopOrNarrowOop: AllStatic {
408 typedef typename OopOrNarrowOopInternal<std::decay_t<T>>::type type;
409 };
410
411 inline void* field_addr(oop base, ptrdiff_t byte_offset) {
412 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
413 }
414 // Step 4: Runtime dispatch
415 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
416 // accessor. This is required when the access either depends on whether compressed oops
417 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
418 // barriers). The way it works is that a function pointer initially pointing to an
419 // accessor resolution function gets called for each access. Upon first invocation,
420 // it resolves which accessor to be used in future invocations and patches the
421 // function pointer to this new accessor.
422
423 template <DecoratorSet decorators, typename T, BarrierType type>
424 struct RuntimeDispatch: AllStatic {};
425
426 template <DecoratorSet decorators, typename T>
427 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
428 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
429 static func_t _store_func;
430
431 static void store_init(void* addr, T value);
432
433 static inline void store(void* addr, T value) {
434 assert_access_thread_state();
435 _store_func(addr, value);
436 }
437 };
438
439 template <DecoratorSet decorators, typename T>
440 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
441 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
442 static func_t _store_at_func;
443
444 static void store_at_init(oop base, ptrdiff_t offset, T value);
445
446 static inline void store_at(oop base, ptrdiff_t offset, T value) {
447 assert_access_thread_state();
448 _store_at_func(base, offset, value);
449 }
450 };
451
452 template <DecoratorSet decorators, typename T>
453 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
454 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
455 static func_t _load_func;
456
457 static T load_init(void* addr);
458
459 static inline T load(void* addr) {
460 assert_access_thread_state();
461 return _load_func(addr);
462 }
463 };
464
465 template <DecoratorSet decorators, typename T>
466 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
467 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
468 static func_t _load_at_func;
469
470 static T load_at_init(oop base, ptrdiff_t offset);
471
472 static inline T load_at(oop base, ptrdiff_t offset) {
473 assert_access_thread_state();
474 return _load_at_func(base, offset);
475 }
476 };
477
478 template <DecoratorSet decorators, typename T>
479 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
480 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
481 static func_t _atomic_cmpxchg_func;
482
483 static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
484
485 static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
486 assert_access_thread_state();
487 return _atomic_cmpxchg_func(addr, compare_value, new_value);
488 }
489 };
490
491 template <DecoratorSet decorators, typename T>
492 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
493 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
494 static func_t _atomic_cmpxchg_at_func;
495
496 static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
497
498 static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
499 assert_access_thread_state();
500 return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
501 }
502 };
503
504 template <DecoratorSet decorators, typename T>
505 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
506 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
507 static func_t _atomic_xchg_func;
508
509 static T atomic_xchg_init(void* addr, T new_value);
510
511 static inline T atomic_xchg(void* addr, T new_value) {
512 assert_access_thread_state();
513 return _atomic_xchg_func(addr, new_value);
514 }
515 };
516
517 template <DecoratorSet decorators, typename T>
518 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
519 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
520 static func_t _atomic_xchg_at_func;
521
522 static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
523
524 static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
525 assert_access_thread_state();
526 return _atomic_xchg_at_func(base, offset, new_value);
527 }
528 };
529
530 template <DecoratorSet decorators, typename T>
531 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
532 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
533 static func_t _arraycopy_func;
534
535 static OopCopyResult arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
536 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
537 size_t length);
538
539 static inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
540 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
541 size_t length) {
542 assert_access_thread_state();
543 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
544 dst_obj, dst_offset_in_bytes, dst_raw,
545 length);
546 }
547 };
548
549 template <DecoratorSet decorators, typename T>
550 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
551 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
552 static func_t _clone_func;
553
554 static void clone_init(oop src, oop dst, size_t size);
555
556 static inline void clone(oop src, oop dst, size_t size) {
557 assert_access_thread_state();
558 _clone_func(src, dst, size);
559 }
560 };
561
562 template <DecoratorSet decorators, typename T>
563 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic {
564 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t;
565 static func_t _value_copy_func;
566
567 static void value_copy_init(const ValuePayload& src, const ValuePayload& dst);
568
569 static inline void value_copy(const ValuePayload& src, const ValuePayload& dst) {
570 _value_copy_func(src, dst);
571 }
572 };
573
574 template <DecoratorSet decorators, typename T>
575 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_STORE_NULL>: AllStatic {
576 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_STORE_NULL>::type func_t;
577 static func_t _value_store_null_func;
578
579 static void value_store_null_init(const ValuePayload& dst);
580
581 static inline void value_store_null(const ValuePayload& dst) {
582 _value_store_null_func(dst);
583 }
584 };
585
586 // Initialize the function pointers to point to the resolving function.
587 template <DecoratorSet decorators, typename T>
588 typename AccessFunction<decorators, T, BARRIER_STORE>::type
589 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
590
591 template <DecoratorSet decorators, typename T>
592 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
593 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
594
595 template <DecoratorSet decorators, typename T>
596 typename AccessFunction<decorators, T, BARRIER_LOAD>::type
597 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
598
599 template <DecoratorSet decorators, typename T>
600 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
601 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
602
603 template <DecoratorSet decorators, typename T>
604 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
605 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
606
607 template <DecoratorSet decorators, typename T>
608 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
609 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
610
611 template <DecoratorSet decorators, typename T>
612 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
613 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
614
615 template <DecoratorSet decorators, typename T>
616 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
617 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
618
619 template <DecoratorSet decorators, typename T>
620 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
621 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
622
623 template <DecoratorSet decorators, typename T>
624 typename AccessFunction<decorators, T, BARRIER_CLONE>::type
625 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
626
627 template <DecoratorSet decorators, typename T>
628 typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type
629 RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init;
630
631 template <DecoratorSet decorators, typename T>
632 typename AccessFunction<decorators, T, BARRIER_VALUE_STORE_NULL>::type
633 RuntimeDispatch<decorators, T, BARRIER_VALUE_STORE_NULL>::_value_store_null_func = &value_store_null_init;
634
635 // Step 3: Pre-runtime dispatching.
636 // The PreRuntimeDispatch class is responsible for filtering the barrier strength
637 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
638 // dispatch point. Otherwise it goes through a runtime check if hardwiring was
639 // not possible.
640 struct PreRuntimeDispatch: AllStatic {
641 template<DecoratorSet decorators>
642 struct CanHardwireRaw: public std::integral_constant<
643 bool,
644 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
645 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
646 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
647 {};
648
649 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
650
651 template<DecoratorSet decorators>
652 static bool is_hardwired_primitive() {
653 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
654 }
655
656 template <DecoratorSet decorators, typename T>
657 inline static typename EnableIf<
658 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
659 store(void* addr, T value) {
660 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
661 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
662 Raw::oop_store(addr, value);
663 } else {
664 Raw::store(addr, value);
665 }
666 }
667
668 template <DecoratorSet decorators, typename T>
669 inline static typename EnableIf<
670 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
671 store(void* addr, T value) {
672 if (UseCompressedOops) {
673 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
674 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
675 } else {
676 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
677 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
678 }
679 }
680
681 template <DecoratorSet decorators, typename T>
682 inline static typename EnableIf<
683 !HasDecorator<decorators, AS_RAW>::value>::type
684 store(void* addr, T value) {
685 if (is_hardwired_primitive<decorators>()) {
686 const DecoratorSet expanded_decorators = decorators | AS_RAW;
687 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
688 } else {
689 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
690 }
691 }
692
693 template <DecoratorSet decorators, typename T>
694 inline static typename EnableIf<
695 HasDecorator<decorators, AS_RAW>::value>::type
696 store_at(oop base, ptrdiff_t offset, T value) {
697 store<decorators>(field_addr(base, offset), value);
698 }
699
700 template <DecoratorSet decorators, typename T>
701 inline static typename EnableIf<
702 !HasDecorator<decorators, AS_RAW>::value>::type
703 store_at(oop base, ptrdiff_t offset, T value) {
704 if (is_hardwired_primitive<decorators>()) {
705 const DecoratorSet expanded_decorators = decorators | AS_RAW;
706 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
707 } else {
708 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
709 }
710 }
711
712 template <DecoratorSet decorators, typename T>
713 inline static typename EnableIf<
714 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
715 load(void* addr) {
716 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
717 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
718 return Raw::template oop_load<T>(addr);
719 } else {
720 return Raw::template load<T>(addr);
721 }
722 }
723
724 template <DecoratorSet decorators, typename T>
725 inline static typename EnableIf<
726 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
727 load(void* addr) {
728 if (UseCompressedOops) {
729 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
730 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
731 } else {
732 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
733 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
734 }
735 }
736
737 template <DecoratorSet decorators, typename T>
738 inline static typename EnableIf<
739 !HasDecorator<decorators, AS_RAW>::value, T>::type
740 load(void* addr) {
741 if (is_hardwired_primitive<decorators>()) {
742 const DecoratorSet expanded_decorators = decorators | AS_RAW;
743 return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
744 } else {
745 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
746 }
747 }
748
749 template <DecoratorSet decorators, typename T>
750 inline static typename EnableIf<
751 HasDecorator<decorators, AS_RAW>::value, T>::type
752 load_at(oop base, ptrdiff_t offset) {
753 return load<decorators, T>(field_addr(base, offset));
754 }
755
756 template <DecoratorSet decorators, typename T>
757 inline static typename EnableIf<
758 !HasDecorator<decorators, AS_RAW>::value, T>::type
759 load_at(oop base, ptrdiff_t offset) {
760 if (is_hardwired_primitive<decorators>()) {
761 const DecoratorSet expanded_decorators = decorators | AS_RAW;
762 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
763 } else {
764 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
765 }
766 }
767
768 template <DecoratorSet decorators, typename T>
769 inline static typename EnableIf<
770 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
771 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
772 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
773 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
774 return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
775 } else {
776 return Raw::atomic_cmpxchg(addr, compare_value, new_value);
777 }
778 }
779
780 template <DecoratorSet decorators, typename T>
781 inline static typename EnableIf<
782 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
783 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
784 if (UseCompressedOops) {
785 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
786 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
787 } else {
788 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
789 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
790 }
791 }
792
793 template <DecoratorSet decorators, typename T>
794 inline static typename EnableIf<
795 !HasDecorator<decorators, AS_RAW>::value, T>::type
796 atomic_cmpxchg(void* addr, T compare_value, T new_value) {
797 if (is_hardwired_primitive<decorators>()) {
798 const DecoratorSet expanded_decorators = decorators | AS_RAW;
799 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
800 } else {
801 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
802 }
803 }
804
805 template <DecoratorSet decorators, typename T>
806 inline static typename EnableIf<
807 HasDecorator<decorators, AS_RAW>::value, T>::type
808 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
809 return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
810 }
811
812 template <DecoratorSet decorators, typename T>
813 inline static typename EnableIf<
814 !HasDecorator<decorators, AS_RAW>::value, T>::type
815 atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
816 if (is_hardwired_primitive<decorators>()) {
817 const DecoratorSet expanded_decorators = decorators | AS_RAW;
818 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
819 } else {
820 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
821 }
822 }
823
824 template <DecoratorSet decorators, typename T>
825 inline static typename EnableIf<
826 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
827 atomic_xchg(void* addr, T new_value) {
828 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
829 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
830 return Raw::oop_atomic_xchg(addr, new_value);
831 } else {
832 return Raw::atomic_xchg(addr, new_value);
833 }
834 }
835
836 template <DecoratorSet decorators, typename T>
837 inline static typename EnableIf<
838 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
839 atomic_xchg(void* addr, T new_value) {
840 if (UseCompressedOops) {
841 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
842 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
843 } else {
844 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
845 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
846 }
847 }
848
849 template <DecoratorSet decorators, typename T>
850 inline static typename EnableIf<
851 !HasDecorator<decorators, AS_RAW>::value, T>::type
852 atomic_xchg(void* addr, T new_value) {
853 if (is_hardwired_primitive<decorators>()) {
854 const DecoratorSet expanded_decorators = decorators | AS_RAW;
855 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
856 } else {
857 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
858 }
859 }
860
861 template <DecoratorSet decorators, typename T>
862 inline static typename EnableIf<
863 HasDecorator<decorators, AS_RAW>::value, T>::type
864 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
865 return atomic_xchg<decorators>(field_addr(base, offset), new_value);
866 }
867
868 template <DecoratorSet decorators, typename T>
869 inline static typename EnableIf<
870 !HasDecorator<decorators, AS_RAW>::value, T>::type
871 atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
872 if (is_hardwired_primitive<decorators>()) {
873 const DecoratorSet expanded_decorators = decorators | AS_RAW;
874 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
875 } else {
876 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
877 }
878 }
879
880 template <DecoratorSet decorators, typename T>
881 inline static typename EnableIf<
882 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, OopCopyResult>::type
883 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
884 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
885 size_t length) {
886 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
887 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
888 Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
889 dst_obj, dst_offset_in_bytes, dst_raw,
890 length);
891 } else {
892 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
893 dst_obj, dst_offset_in_bytes, dst_raw,
894 length);
895 }
896
897 return OopCopyResult::ok;
898 }
899
900 template <DecoratorSet decorators, typename T>
901 inline static typename EnableIf<
902 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, OopCopyResult>::type
903 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
904 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
905 size_t length) {
906 if (UseCompressedOops) {
907 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
908 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
909 dst_obj, dst_offset_in_bytes, dst_raw,
910 length);
911 } else {
912 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
913 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
914 dst_obj, dst_offset_in_bytes, dst_raw,
915 length);
916 }
917 }
918
919 template <DecoratorSet decorators, typename T>
920 inline static typename EnableIf<
921 !HasDecorator<decorators, AS_RAW>::value, OopCopyResult>::type
922 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
923 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
924 size_t length) {
925 if (is_hardwired_primitive<decorators>()) {
926 const DecoratorSet expanded_decorators = decorators | AS_RAW;
927 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
928 dst_obj, dst_offset_in_bytes, dst_raw,
929 length);
930 } else {
931 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw,
932 dst_obj, dst_offset_in_bytes, dst_raw,
933 length);
934 }
935 }
936
937 template <DecoratorSet decorators>
938 inline static typename EnableIf<
939 HasDecorator<decorators, AS_RAW>::value>::type
940 clone(oop src, oop dst, size_t size) {
941 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
942 Raw::clone(src, dst, size);
943 }
944
945 template <DecoratorSet decorators>
946 inline static typename EnableIf<
947 !HasDecorator<decorators, AS_RAW>::value>::type
948 clone(oop src, oop dst, size_t size) {
949 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
950 }
951
952 template <DecoratorSet decorators>
953 inline static typename EnableIf<
954 HasDecorator<decorators, AS_RAW>::value>::type
955 value_copy(const ValuePayload& src, const ValuePayload& dst) {
956 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
957 Raw::value_copy(src, dst);
958 }
959
960 template <DecoratorSet decorators>
961 inline static typename EnableIf<
962 !HasDecorator<decorators, AS_RAW>::value>::type
963 value_copy(const ValuePayload& src, const ValuePayload& dst) {
964 const DecoratorSet expanded_decorators = decorators;
965 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst);
966 }
967
968 template <DecoratorSet decorators>
969 inline static typename EnableIf<
970 HasDecorator<decorators, AS_RAW>::value>::type
971 value_store_null(const ValuePayload& dst) {
972 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
973 Raw::value_store_null(dst);
974 }
975
976 template <DecoratorSet decorators>
977 inline static typename EnableIf<
978 !HasDecorator<decorators, AS_RAW>::value>::type
979 value_store_null(const ValuePayload& dst) {
980 const DecoratorSet expanded_decorators = decorators;
981 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_STORE_NULL>::value_store_null(dst);
982 }
983 };
984
985 // Step 2: Reduce types.
986 // Enforce that for non-oop types, T and P have to be strictly the same.
987 // P is the type of the address and T is the type of the values.
988 // As for oop types, it is allow to send T in {narrowOop, oop} and
989 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
990 // the subsequent table. (columns are P, rows are T)
991 // | | HeapWord | oop | narrowOop |
992 // | oop | rt-comp | hw-none | hw-comp |
993 // | narrowOop | x | x | hw-none |
994 //
995 // x means not allowed
996 // rt-comp means it must be checked at runtime whether the oop is compressed.
997 // hw-none means it is statically known the oop will not be compressed.
998 // hw-comp means it is statically known the oop will be compressed.
999
1000 template <DecoratorSet decorators, typename T>
1001 inline void store_reduce_types(T* addr, T value) {
1002 PreRuntimeDispatch::store<decorators>(addr, value);
1003 }
1004
1005 template <DecoratorSet decorators>
1006 inline void store_reduce_types(narrowOop* addr, oop value) {
1007 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1008 INTERNAL_RT_USE_COMPRESSED_OOPS;
1009 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1010 }
1011
1012 template <DecoratorSet decorators>
1013 inline void store_reduce_types(narrowOop* addr, narrowOop value) {
1014 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1015 INTERNAL_RT_USE_COMPRESSED_OOPS;
1016 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1017 }
1018
1019 template <DecoratorSet decorators>
1020 inline void store_reduce_types(HeapWord* addr, oop value) {
1021 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1022 PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1023 }
1024
1025 template <DecoratorSet decorators, typename T>
1026 inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
1027 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
1028 }
1029
1030 template <DecoratorSet decorators>
1031 inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
1032 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1033 INTERNAL_RT_USE_COMPRESSED_OOPS;
1034 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1035 }
1036
1037 template <DecoratorSet decorators>
1038 inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
1039 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1040 INTERNAL_RT_USE_COMPRESSED_OOPS;
1041 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1042 }
1043
1044 template <DecoratorSet decorators>
1045 inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
1046 oop compare_value,
1047 oop new_value) {
1048 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1049 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1050 }
1051
1052 template <DecoratorSet decorators, typename T>
1053 inline T atomic_xchg_reduce_types(T* addr, T new_value) {
1054 const DecoratorSet expanded_decorators = decorators;
1055 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1056 }
1057
1058 template <DecoratorSet decorators>
1059 inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
1060 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1061 INTERNAL_RT_USE_COMPRESSED_OOPS;
1062 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1063 }
1064
1065 template <DecoratorSet decorators>
1066 inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
1067 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1068 INTERNAL_RT_USE_COMPRESSED_OOPS;
1069 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1070 }
1071
1072 template <DecoratorSet decorators>
1073 inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
1074 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1075 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1076 }
1077
1078 template <DecoratorSet decorators, typename T>
1079 inline T load_reduce_types(T* addr) {
1080 return PreRuntimeDispatch::load<decorators, T>(addr);
1081 }
1082
1083 template <DecoratorSet decorators, typename T>
1084 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1085 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1086 INTERNAL_RT_USE_COMPRESSED_OOPS;
1087 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1088 }
1089
1090 template <DecoratorSet decorators, typename T>
1091 inline oop load_reduce_types(HeapWord* addr) {
1092 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1093 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1094 }
1095
1096 template <DecoratorSet decorators, typename T>
1097 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
1098 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1099 size_t length) {
1100 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
1101 dst_obj, dst_offset_in_bytes, dst_raw,
1102 length);
1103 }
1104
1105 template <DecoratorSet decorators>
1106 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw,
1107 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw,
1108 size_t length) {
1109 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1110 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1111 dst_obj, dst_offset_in_bytes, dst_raw,
1112 length);
1113 }
1114
1115 template <DecoratorSet decorators>
1116 inline OopCopyResult arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw,
1117 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw,
1118 size_t length) {
1119 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1120 INTERNAL_RT_USE_COMPRESSED_OOPS;
1121 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw,
1122 dst_obj, dst_offset_in_bytes, dst_raw,
1123 length);
1124 }
1125
1126 // Step 1: Set default decorators. This step remembers if a type was volatile
1127 // and then sets the MO_RELAXED decorator by default. Otherwise, a default
1128 // memory ordering is set for the access, and the implied decorator rules
1129 // are applied to select sensible defaults for decorators that have not been
1130 // explicitly set. For example, default object referent strength is set to strong.
1131 // This step also decays the types passed in (e.g. getting rid of CV qualifiers
1132 // and references from the types). This step also perform some type verification
1133 // that the passed in types make sense.
1134
1135 template <DecoratorSet decorators, typename T>
1136 static void verify_types(){
1137 // If this fails to compile, then you have sent in something that is
1138 // not recognized as a valid primitive type to a primitive Access function.
1139 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
1140 (std::is_pointer<T>::value || std::is_integral<T>::value) ||
1141 std::is_floating_point<T>::value)); // not allowed primitive type
1142 }
1143
1144 template <DecoratorSet decorators, typename P, typename T>
1145 inline void store(P* addr, T value) {
1146 verify_types<decorators, T>();
1147 using DecayedP = std::decay_t<P>;
1148 using DecayedT = std::decay_t<T>;
1149 DecayedT decayed_value = value;
1150 // If a volatile address is passed in but no memory ordering decorator,
1151 // set the memory ordering to MO_RELAXED by default.
1152 const DecoratorSet expanded_decorators = DecoratorFixup<
1153 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1154 (MO_RELAXED | decorators) : decorators>::value;
1155 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1156 }
1157
1158 template <DecoratorSet decorators, typename T>
1159 inline void store_at(oop base, ptrdiff_t offset, T value) {
1160 verify_types<decorators, T>();
1161 using DecayedT = std::decay_t<T>;
1162 DecayedT decayed_value = value;
1163 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1164 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1165 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1166 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1167 }
1168
1169 template <DecoratorSet decorators, typename P, typename T>
1170 inline T load(P* addr) {
1171 verify_types<decorators, T>();
1172 using DecayedP = std::decay_t<P>;
1173 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1174 typename OopOrNarrowOop<T>::type,
1175 std::decay_t<T>>;
1176 // If a volatile address is passed in but no memory ordering decorator,
1177 // set the memory ordering to MO_RELAXED by default.
1178 const DecoratorSet expanded_decorators = DecoratorFixup<
1179 (std::is_volatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1180 (MO_RELAXED | decorators) : decorators>::value;
1181 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1182 }
1183
1184 template <DecoratorSet decorators, typename T>
1185 inline T load_at(oop base, ptrdiff_t offset) {
1186 verify_types<decorators, T>();
1187 using DecayedT = std::conditional_t<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1188 typename OopOrNarrowOop<T>::type,
1189 std::decay_t<T>>;
1190 // Expand the decorators (figure out sensible defaults)
1191 // Potentially remember if we need compressed oop awareness
1192 const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1193 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1194 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1195 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1196 }
1197
1198 template <DecoratorSet decorators, typename P, typename T>
1199 inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1200 verify_types<decorators, T>();
1201 using DecayedP = std::decay_t<P>;
1202 using DecayedT = std::decay_t<T>;
1203 DecayedT new_decayed_value = new_value;
1204 DecayedT compare_decayed_value = compare_value;
1205 const DecoratorSet expanded_decorators = DecoratorFixup<
1206 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1207 (MO_SEQ_CST | decorators) : decorators>::value;
1208 return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1209 compare_decayed_value,
1210 new_decayed_value);
1211 }
1212
1213 template <DecoratorSet decorators, typename T>
1214 inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1215 verify_types<decorators, T>();
1216 using DecayedT = std::decay_t<T>;
1217 DecayedT new_decayed_value = new_value;
1218 DecayedT compare_decayed_value = compare_value;
1219 // Determine default memory ordering
1220 const DecoratorSet expanded_decorators = DecoratorFixup<
1221 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1222 (MO_SEQ_CST | decorators) : decorators>::value;
1223 // Potentially remember that we need compressed oop awareness
1224 const DecoratorSet final_decorators = expanded_decorators |
1225 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1226 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1227 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1228 new_decayed_value);
1229 }
1230
1231 template <DecoratorSet decorators, typename P, typename T>
1232 inline T atomic_xchg(P* addr, T new_value) {
1233 verify_types<decorators, T>();
1234 using DecayedP = std::decay_t<P>;
1235 using DecayedT = std::decay_t<T>;
1236 DecayedT new_decayed_value = new_value;
1237 // atomic_xchg is only available in SEQ_CST flavour.
1238 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1239 return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1240 new_decayed_value);
1241 }
1242
1243 template <DecoratorSet decorators, typename T>
1244 inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1245 verify_types<decorators, T>();
1246 using DecayedT = std::decay_t<T>;
1247 DecayedT new_decayed_value = new_value;
1248 // atomic_xchg is only available in SEQ_CST flavour.
1249 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1250 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1251 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1252 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1253 }
1254
1255 template <DecoratorSet decorators, typename T>
1256 inline OopCopyResult arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1257 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1258 size_t length) {
1259 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1260 (std::is_same<T, void>::value || std::is_integral<T>::value) ||
1261 std::is_floating_point<T>::value)); // arraycopy allows type erased void elements
1262 using DecayedT = std::decay_t<T>;
1263 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1264 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1265 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1266 length);
1267 }
1268
1269 template <DecoratorSet decorators>
1270 inline void clone(oop src, oop dst, size_t size) {
1271 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1272 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1273 }
1274
1275 template <DecoratorSet decorators>
1276 inline void value_copy(const ValuePayload& src, const ValuePayload& dst) {
1277 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1278 PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst);
1279 }
1280
1281 template <DecoratorSet decorators>
1282 static inline void value_store_null(const ValuePayload& dst) {
1283 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1284 PreRuntimeDispatch::value_store_null<expanded_decorators>(dst);
1285 }
1286
1287 // Infer the type that should be returned from an Access::oop_load.
1288 template <typename P, DecoratorSet decorators>
1289 class OopLoadProxy: public StackObj {
1290 private:
1291 P *const _addr;
1292 public:
1293 explicit OopLoadProxy(P* addr) : _addr(addr) {}
1294
1295 inline operator oop() {
1296 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
1297 }
1298
1299 inline operator narrowOop() {
1300 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
1301 }
1302
1303 template <typename T>
1304 inline bool operator ==(const T& other) const {
1305 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
1306 }
1307
1308 template <typename T>
1309 inline bool operator !=(const T& other) const {
1310 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
1311 }
1312
1313 inline bool operator ==(std::nullptr_t) const {
1314 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) == nullptr;
1315 }
1316
1317 inline bool operator !=(std::nullptr_t) const {
1318 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr) != nullptr;
1319 }
1320 };
1321
1322 // Infer the type that should be returned from an Access::load_at.
1323 template <DecoratorSet decorators>
1324 class LoadAtProxy: public StackObj {
1325 private:
1326 const oop _base;
1327 const ptrdiff_t _offset;
1328 public:
1329 LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1330
1331 template <typename T>
1332 inline operator T() const {
1333 return load_at<decorators, T>(_base, _offset);
1334 }
1335
1336 template <typename T>
1337 inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
1338
1339 template <typename T>
1340 inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
1341 };
1342
1343 // Infer the type that should be returned from an Access::oop_load_at.
1344 template <DecoratorSet decorators>
1345 class OopLoadAtProxy: public StackObj {
1346 private:
1347 const oop _base;
1348 const ptrdiff_t _offset;
1349 public:
1350 OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
1351
1352 inline operator oop() const {
1353 return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
1354 }
1355
1356 inline operator narrowOop() const {
1357 return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
1358 }
1359
1360 template <typename T>
1361 inline bool operator ==(const T& other) const {
1362 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
1363 }
1364
1365 template <typename T>
1366 inline bool operator !=(const T& other) const {
1367 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
1368 }
1369 };
1370 }
1371
1372 #endif // SHARE_OOPS_ACCESSBACKEND_HPP