1 /*
2 * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_INSTANCEKLASS_INLINE_HPP
26 #define SHARE_OOPS_INSTANCEKLASS_INLINE_HPP
27
28 #include "oops/instanceKlass.hpp"
29
30 #include "memory/memRegion.hpp"
31 #include "oops/fieldInfo.inline.hpp"
32 #include "oops/klass.inline.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/atomicAccess.hpp"
35 #include "utilities/devirtualizer.inline.hpp"
36 #include "utilities/globalDefinitions.hpp"
37
38 inline intptr_t* InstanceKlass::start_of_itable() const { return (intptr_t*)start_of_vtable() + vtable_length(); }
39 inline intptr_t* InstanceKlass::end_of_itable() const { return start_of_itable() + itable_length(); }
40
41 inline oop InstanceKlass::static_field_base_raw() { return java_mirror(); }
42
43 inline Symbol* InstanceKlass::field_name(int index) const { return field(index).name(constants()); }
44 inline Symbol* InstanceKlass::field_signature(int index) const { return field(index).signature(constants()); }
45
46 inline int InstanceKlass::java_fields_count() const { return FieldInfoStream::num_java_fields(fieldinfo_stream()); }
47 inline int InstanceKlass::total_fields_count() const { return FieldInfoStream::num_total_fields(fieldinfo_stream()); }
48
49 inline OopMapBlock* InstanceKlass::start_of_nonstatic_oop_maps() const {
50 return (OopMapBlock*)(start_of_itable() + itable_length());
51 }
52
53 inline Klass** InstanceKlass::end_of_nonstatic_oop_maps() const {
54 return (Klass**)(start_of_nonstatic_oop_maps() +
55 nonstatic_oop_map_count());
56 }
57
58 inline InstanceKlass* volatile* InstanceKlass::adr_implementor() const {
59 if (is_interface()) {
60 return (InstanceKlass* volatile*)end_of_nonstatic_oop_maps();
61 } else {
62 return nullptr;
63 }
64 }
65
66 inline InlineKlass* InstanceKlass::get_inline_type_field_klass(int idx) const {
67 assert(has_inline_type_fields(), "Sanity checking");
68 assert(idx < java_fields_count(), "IOOB");
69 InlineKlass* k = inline_layout_info(idx).klass();
70 assert(k != nullptr, "Should always be set before being read");
71 return k;
72 }
73
74 inline InlineKlass* InstanceKlass::get_inline_type_field_klass_or_null(int idx) const {
75 assert(has_inline_type_fields(), "Sanity checking");
76 assert(idx < java_fields_count(), "IOOB");
77 InlineKlass* k = inline_layout_info(idx).klass();
78 return k;
79 }
80
81 inline ObjArrayKlass* InstanceKlass::array_klasses_acquire() const {
82 return AtomicAccess::load_acquire(&_array_klasses);
83 }
84
85 inline void InstanceKlass::release_set_array_klasses(ObjArrayKlass* k) {
86 AtomicAccess::release_store(&_array_klasses, k);
87 }
88
89 // The iteration over the oops in objects is a hot path in the GC code.
90 // By force inlining the following functions, we get similar GC performance
91 // as the previous macro based implementation.
92
93 template <typename T, class OopClosureType>
94 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
95 T* p = obj->field_addr<T>(map->offset());
96 T* const end = p + map->count();
97
98 for (; p < end; ++p) {
99 Devirtualizer::do_oop(closure, p);
100 }
101 }
102
103 template <typename T, class OopClosureType>
104 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
105 T* const start = obj->field_addr<T>(map->offset());
106 T* p = start + map->count();
107
108 while (start < p) {
109 --p;
110 Devirtualizer::do_oop(closure, p);
111 }
112 }
113
114 template <typename T, class OopClosureType>
115 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
116 T* p = obj->field_addr<T>(map->offset());
117 T* end = p + map->count();
118
119 T* const l = (T*)mr.start();
120 T* const h = (T*)mr.end();
121 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
122 mask_bits((intptr_t)h, sizeof(T)-1) == 0,
123 "bounded region must be properly aligned");
124
125 if (p < l) {
126 p = l;
127 }
128 if (end > h) {
129 end = h;
130 }
131
132 for (;p < end; ++p) {
133 Devirtualizer::do_oop(closure, p);
134 }
135 }
136
137 template <typename T, class OopClosureType>
138 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
139 OopMapBlock* map = start_of_nonstatic_oop_maps();
140 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
141
142 for (; map < end_map; ++map) {
143 oop_oop_iterate_oop_map<T>(map, obj, closure);
144 }
145 }
146
147 template <typename T, class OopClosureType>
148 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
149 OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
150 OopMapBlock* map = start_map + nonstatic_oop_map_count();
151
152 while (start_map < map) {
153 --map;
154 oop_oop_iterate_oop_map_reverse<T>(map, obj, closure);
155 }
156 }
157
158 template <typename T, class OopClosureType>
159 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
160 OopMapBlock* map = start_of_nonstatic_oop_maps();
161 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
162
163 for (;map < end_map; ++map) {
164 oop_oop_iterate_oop_map_bounded<T>(map, obj, closure, mr);
165 }
166 }
167
168 template <typename T, class OopClosureType>
169 ALWAYSINLINE void InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
170 if (Devirtualizer::do_metadata(closure)) {
171 Devirtualizer::do_klass(closure, this);
172 }
173
174 oop_oop_iterate_oop_maps<T>(obj, closure);
175 }
176
177 template <typename T, class OopClosureType>
178 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
179 assert(!Devirtualizer::do_metadata(closure),
180 "Code to handle metadata is not implemented");
181
182 oop_oop_iterate_oop_maps_reverse<T>(obj, closure);
183 }
184
185 template <typename T, class OopClosureType>
186 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
187 if (Devirtualizer::do_metadata(closure)) {
188 if (mr.contains(obj)) {
189 Devirtualizer::do_klass(closure, this);
190 }
191 }
192
193 oop_oop_iterate_oop_maps_bounded<T>(obj, closure, mr);
194 }
195
196 #endif // SHARE_OOPS_INSTANCEKLASS_INLINE_HPP