1028 break;
1029
1030 case C1StubId::new_instance_id:
1031 case C1StubId::fast_new_instance_id:
1032 case C1StubId::fast_new_instance_init_check_id:
1033 {
1034 Register klass = rdx; // Incoming
1035 Register obj = rax; // Result
1036
1037 if (id == C1StubId::new_instance_id) {
1038 __ set_info("new_instance", dont_gc_arguments);
1039 } else if (id == C1StubId::fast_new_instance_id) {
1040 __ set_info("fast new_instance", dont_gc_arguments);
1041 } else {
1042 assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
1043 __ set_info("fast new_instance init check", dont_gc_arguments);
1044 }
1045
1046 __ enter();
1047 OopMap* map = save_live_registers(sasm, 2);
1048 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1049 oop_maps = new OopMapSet();
1050 oop_maps->add_gc_map(call_offset, map);
1051 restore_live_registers_except_rax(sasm);
1052 __ verify_oop(obj);
1053 __ leave();
1054 __ ret(0);
1055
1056 // rax,: new instance
1057 }
1058
1059 break;
1060
1061 case C1StubId::counter_overflow_id:
1062 {
1063 Register bci = rax, method = rbx;
1064 __ enter();
1065 OopMap* map = save_live_registers(sasm, 3);
1066 // Retrieve bci
1067 __ movl(bci, Address(rbp, 2*BytesPerWord));
1068 // And a pointer to the Method*
1069 __ movptr(method, Address(rbp, 3*BytesPerWord));
1070 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1071 oop_maps = new OopMapSet();
1072 oop_maps->add_gc_map(call_offset, map);
1073 restore_live_registers(sasm);
1074 __ leave();
1075 __ ret(0);
1076 }
1077 break;
1078
1079 case C1StubId::new_type_array_id:
1080 case C1StubId::new_object_array_id:
1081 {
1082 Register length = rbx; // Incoming
1083 Register klass = rdx; // Incoming
1084 Register obj = rax; // Result
1085
1086 if (id == C1StubId::new_type_array_id) {
1087 __ set_info("new_type_array", dont_gc_arguments);
1088 } else {
1089 __ set_info("new_object_array", dont_gc_arguments);
1090 }
1091
1092 #ifdef ASSERT
1093 // assert object type is really an array of the proper kind
1094 {
1095 Label ok;
1096 Register t0 = obj;
1097 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1098 __ sarl(t0, Klass::_lh_array_tag_shift);
1099 int tag = ((id == C1StubId::new_type_array_id)
1100 ? Klass::_lh_array_tag_type_value
1101 : Klass::_lh_array_tag_obj_value);
1102 __ cmpl(t0, tag);
1103 __ jcc(Assembler::equal, ok);
1104 __ stop("assert(is an array klass)");
1105 __ should_not_reach_here();
1106 __ bind(ok);
1107 }
1108 #endif // ASSERT
1109
1110 __ enter();
1111 OopMap* map = save_live_registers(sasm, 3);
1112 int call_offset;
1113 if (id == C1StubId::new_type_array_id) {
1114 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1115 } else {
1116 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1117 }
1118
1119 oop_maps = new OopMapSet();
1120 oop_maps->add_gc_map(call_offset, map);
1121 restore_live_registers_except_rax(sasm);
1122
1123 __ verify_oop(obj);
1124 __ leave();
1125 __ ret(0);
1126
1127 // rax,: new array
1128 }
1129 break;
1130
1131 case C1StubId::new_multi_array_id:
1132 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1133 // rax,: klass
1134 // rbx,: rank
1135 // rcx: address of 1st dimension
1136 OopMap* map = save_live_registers(sasm, 4);
1137 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1138
1139 oop_maps = new OopMapSet();
1140 oop_maps->add_gc_map(call_offset, map);
1141 restore_live_registers_except_rax(sasm);
1142
1143 // rax,: new multi array
1144 __ verify_oop(rax);
1145 }
1146 break;
1147
1148 case C1StubId::register_finalizer_id:
1149 {
1150 __ set_info("register_finalizer", dont_gc_arguments);
1151
1152 // This is called via call_runtime so the arguments
1153 // will be place in C abi locations
1154
1155 #ifdef _LP64
1156 __ verify_oop(c_rarg0);
1157 __ mov(rax, c_rarg0);
1158 #else
1159 // The object is passed on the stack and we haven't pushed a
1160 // frame yet so it's one work away from top of stack.
1161 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1162 __ verify_oop(rax);
1163 #endif // _LP64
1164
1165 // load the klass and check the has finalizer flag
1166 Label register_finalizer;
1167 Register t = rsi;
1228 // activation and we are calling a leaf VM function only.
1229 generate_unwind_exception(sasm);
1230 }
1231 break;
1232
1233 case C1StubId::throw_array_store_exception_id:
1234 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1235 // tos + 0: link
1236 // + 1: return address
1237 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1238 }
1239 break;
1240
1241 case C1StubId::throw_class_cast_exception_id:
1242 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1243 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1244 }
1245 break;
1246
1247 case C1StubId::throw_incompatible_class_change_error_id:
1248 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1249 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1250 }
1251 break;
1252
1253 case C1StubId::slow_subtype_check_id:
1254 {
1255 // Typical calling sequence:
1256 // __ push(klass_RInfo); // object klass or other subclass
1257 // __ push(sup_k_RInfo); // array element klass or other superclass
1258 // __ call(slow_subtype_check);
1259 // Note that the subclass is pushed first, and is therefore deepest.
1260 // Previous versions of this code reversed the names 'sub' and 'super'.
1261 // This was operationally harmless but made the code unreadable.
1262 enum layout {
1263 rax_off, SLOT2(raxH_off)
1264 rcx_off, SLOT2(rcxH_off)
1265 rsi_off, SLOT2(rsiH_off)
1266 rdi_off, SLOT2(rdiH_off)
1267 // saved_rbp_off, SLOT2(saved_rbpH_off)
1268 return_off, SLOT2(returnH_off)
1269 sup_k_off, SLOT2(sup_kH_off)
1270 klass_off, SLOT2(superH_off)
1271 framesize,
1272 result_off = klass_off // deepest argument is also the return value
|
1028 break;
1029
1030 case C1StubId::new_instance_id:
1031 case C1StubId::fast_new_instance_id:
1032 case C1StubId::fast_new_instance_init_check_id:
1033 {
1034 Register klass = rdx; // Incoming
1035 Register obj = rax; // Result
1036
1037 if (id == C1StubId::new_instance_id) {
1038 __ set_info("new_instance", dont_gc_arguments);
1039 } else if (id == C1StubId::fast_new_instance_id) {
1040 __ set_info("fast new_instance", dont_gc_arguments);
1041 } else {
1042 assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
1043 __ set_info("fast new_instance init check", dont_gc_arguments);
1044 }
1045
1046 __ enter();
1047 OopMap* map = save_live_registers(sasm, 2);
1048 int call_offset;
1049 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1050 oop_maps = new OopMapSet();
1051 oop_maps->add_gc_map(call_offset, map);
1052 restore_live_registers_except_rax(sasm);
1053 __ verify_oop(obj);
1054 __ leave();
1055 __ ret(0);
1056
1057 // rax,: new instance
1058 }
1059
1060 break;
1061
1062 case C1StubId::counter_overflow_id:
1063 {
1064 Register bci = rax, method = rbx;
1065 __ enter();
1066 OopMap* map = save_live_registers(sasm, 3);
1067 // Retrieve bci
1068 __ movl(bci, Address(rbp, 2*BytesPerWord));
1069 // And a pointer to the Method*
1070 __ movptr(method, Address(rbp, 3*BytesPerWord));
1071 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1072 oop_maps = new OopMapSet();
1073 oop_maps->add_gc_map(call_offset, map);
1074 restore_live_registers(sasm);
1075 __ leave();
1076 __ ret(0);
1077 }
1078 break;
1079
1080 case C1StubId::new_type_array_id:
1081 case C1StubId::new_object_array_id:
1082 case C1StubId::new_null_free_array_id:
1083 {
1084 Register length = rbx; // Incoming
1085 Register klass = rdx; // Incoming
1086 Register obj = rax; // Result
1087
1088 if (id == C1StubId::new_type_array_id) {
1089 __ set_info("new_type_array", dont_gc_arguments);
1090 } else if (id == C1StubId::new_object_array_id) {
1091 __ set_info("new_object_array", dont_gc_arguments);
1092 } else {
1093 __ set_info("new_null_free_array", dont_gc_arguments);
1094 }
1095
1096 #ifdef ASSERT
1097 // assert object type is really an array of the proper kind
1098 {
1099 Label ok;
1100 Register t0 = obj;
1101 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1102 __ sarl(t0, Klass::_lh_array_tag_shift);
1103 switch (id) {
1104 case C1StubId::new_type_array_id:
1105 __ cmpl(t0, Klass::_lh_array_tag_type_value);
1106 __ jcc(Assembler::equal, ok);
1107 __ stop("assert(is a type array klass)");
1108 break;
1109 case C1StubId::new_object_array_id:
1110 __ cmpl(t0, Klass::_lh_array_tag_obj_value); // new "[Ljava/lang/Object;"
1111 __ jcc(Assembler::equal, ok);
1112 __ cmpl(t0, Klass::_lh_array_tag_vt_value); // new "[LVT;"
1113 __ jcc(Assembler::equal, ok);
1114 __ stop("assert(is an object or inline type array klass)");
1115 break;
1116 case C1StubId::new_null_free_array_id:
1117 __ cmpl(t0, Klass::_lh_array_tag_vt_value); // the array can be a flat array.
1118 __ jcc(Assembler::equal, ok);
1119 __ cmpl(t0, Klass::_lh_array_tag_obj_value); // the array cannot be a flat array (due to InlineArrayElementMaxFlatSize, etc)
1120 __ jcc(Assembler::equal, ok);
1121 __ stop("assert(is an object or inline type array klass)");
1122 break;
1123 default: ShouldNotReachHere();
1124 }
1125 __ should_not_reach_here();
1126 __ bind(ok);
1127 }
1128 #endif // ASSERT
1129
1130 __ enter();
1131 OopMap* map = save_live_registers(sasm, 3);
1132 int call_offset;
1133 if (id == C1StubId::new_type_array_id) {
1134 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1135 } else if (id == C1StubId::new_object_array_id) {
1136 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1137 } else {
1138 assert(id == C1StubId::new_null_free_array_id, "must be");
1139 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_null_free_array), klass, length);
1140 }
1141
1142 oop_maps = new OopMapSet();
1143 oop_maps->add_gc_map(call_offset, map);
1144 restore_live_registers_except_rax(sasm);
1145
1146 __ verify_oop(obj);
1147 __ leave();
1148 __ ret(0);
1149
1150 // rax,: new array
1151 }
1152 break;
1153
1154 case C1StubId::new_multi_array_id:
1155 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1156 // rax,: klass
1157 // rbx,: rank
1158 // rcx: address of 1st dimension
1159 OopMap* map = save_live_registers(sasm, 4);
1160 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1161
1162 oop_maps = new OopMapSet();
1163 oop_maps->add_gc_map(call_offset, map);
1164 restore_live_registers_except_rax(sasm);
1165
1166 // rax,: new multi array
1167 __ verify_oop(rax);
1168 }
1169 break;
1170
1171 case C1StubId::load_flat_array_id:
1172 {
1173 StubFrame f(sasm, "load_flat_array", dont_gc_arguments);
1174 OopMap* map = save_live_registers(sasm, 3);
1175
1176 // Called with store_parameter and not C abi
1177
1178 f.load_argument(1, rax); // rax,: array
1179 f.load_argument(0, rbx); // rbx,: index
1180 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, load_flat_array), rax, rbx);
1181
1182 oop_maps = new OopMapSet();
1183 oop_maps->add_gc_map(call_offset, map);
1184 restore_live_registers_except_rax(sasm);
1185
1186 // rax,: loaded element at array[index]
1187 __ verify_oop(rax);
1188 }
1189 break;
1190
1191 case C1StubId::store_flat_array_id:
1192 {
1193 StubFrame f(sasm, "store_flat_array", dont_gc_arguments);
1194 OopMap* map = save_live_registers(sasm, 4);
1195
1196 // Called with store_parameter and not C abi
1197
1198 f.load_argument(2, rax); // rax,: array
1199 f.load_argument(1, rbx); // rbx,: index
1200 f.load_argument(0, rcx); // rcx,: value
1201 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flat_array), rax, rbx, rcx);
1202
1203 oop_maps = new OopMapSet();
1204 oop_maps->add_gc_map(call_offset, map);
1205 restore_live_registers_except_rax(sasm);
1206 }
1207 break;
1208
1209 case C1StubId::substitutability_check_id:
1210 {
1211 StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
1212 OopMap* map = save_live_registers(sasm, 3);
1213
1214 // Called with store_parameter and not C abi
1215
1216 f.load_argument(1, rax); // rax,: left
1217 f.load_argument(0, rbx); // rbx,: right
1218 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), rax, rbx);
1219
1220 oop_maps = new OopMapSet();
1221 oop_maps->add_gc_map(call_offset, map);
1222 restore_live_registers_except_rax(sasm);
1223
1224 // rax,: are the two operands substitutable
1225 }
1226 break;
1227
1228
1229 case C1StubId::buffer_inline_args_id:
1230 case C1StubId::buffer_inline_args_no_receiver_id:
1231 {
1232 const char* name = (id == C1StubId::buffer_inline_args_id) ?
1233 "buffer_inline_args" : "buffer_inline_args_no_receiver";
1234 StubFrame f(sasm, name, dont_gc_arguments);
1235 OopMap* map = save_live_registers(sasm, 2);
1236 Register method = rbx;
1237 address entry = (id == C1StubId::buffer_inline_args_id) ?
1238 CAST_FROM_FN_PTR(address, buffer_inline_args) :
1239 CAST_FROM_FN_PTR(address, buffer_inline_args_no_receiver);
1240 int call_offset = __ call_RT(rax, noreg, entry, method);
1241 oop_maps = new OopMapSet();
1242 oop_maps->add_gc_map(call_offset, map);
1243 restore_live_registers_except_rax(sasm);
1244 __ verify_oop(rax); // rax: an array of buffered value objects
1245 }
1246 break;
1247
1248 case C1StubId::register_finalizer_id:
1249 {
1250 __ set_info("register_finalizer", dont_gc_arguments);
1251
1252 // This is called via call_runtime so the arguments
1253 // will be place in C abi locations
1254
1255 #ifdef _LP64
1256 __ verify_oop(c_rarg0);
1257 __ mov(rax, c_rarg0);
1258 #else
1259 // The object is passed on the stack and we haven't pushed a
1260 // frame yet so it's one work away from top of stack.
1261 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1262 __ verify_oop(rax);
1263 #endif // _LP64
1264
1265 // load the klass and check the has finalizer flag
1266 Label register_finalizer;
1267 Register t = rsi;
1328 // activation and we are calling a leaf VM function only.
1329 generate_unwind_exception(sasm);
1330 }
1331 break;
1332
1333 case C1StubId::throw_array_store_exception_id:
1334 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1335 // tos + 0: link
1336 // + 1: return address
1337 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1338 }
1339 break;
1340
1341 case C1StubId::throw_class_cast_exception_id:
1342 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1343 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1344 }
1345 break;
1346
1347 case C1StubId::throw_incompatible_class_change_error_id:
1348 { StubFrame f(sasm, "throw_incompatible_class_change_error", dont_gc_arguments);
1349 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1350 }
1351 break;
1352
1353 case C1StubId::throw_illegal_monitor_state_exception_id:
1354 { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
1355 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
1356 }
1357 break;
1358
1359 case C1StubId::throw_identity_exception_id:
1360 { StubFrame f(sasm, "throw_identity_exception", dont_gc_arguments);
1361 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_identity_exception), true);
1362 }
1363 break;
1364
1365 case C1StubId::slow_subtype_check_id:
1366 {
1367 // Typical calling sequence:
1368 // __ push(klass_RInfo); // object klass or other subclass
1369 // __ push(sup_k_RInfo); // array element klass or other superclass
1370 // __ call(slow_subtype_check);
1371 // Note that the subclass is pushed first, and is therefore deepest.
1372 // Previous versions of this code reversed the names 'sub' and 'super'.
1373 // This was operationally harmless but made the code unreadable.
1374 enum layout {
1375 rax_off, SLOT2(raxH_off)
1376 rcx_off, SLOT2(rcxH_off)
1377 rsi_off, SLOT2(rsiH_off)
1378 rdi_off, SLOT2(rdiH_off)
1379 // saved_rbp_off, SLOT2(saved_rbpH_off)
1380 return_off, SLOT2(returnH_off)
1381 sup_k_off, SLOT2(sup_kH_off)
1382 klass_off, SLOT2(superH_off)
1383 framesize,
1384 result_off = klass_off // deepest argument is also the return value
|