1 /*
  2  * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "opto/addnode.hpp"
 26 #include "opto/callnode.hpp"
 27 #include "opto/castnode.hpp"
 28 #include "opto/connode.hpp"
 29 #include "opto/graphKit.hpp"
 30 #include "opto/inlinetypenode.hpp"
 31 #include "opto/matcher.hpp"
 32 #include "opto/phaseX.hpp"
 33 #include "opto/rootnode.hpp"
 34 #include "opto/subnode.hpp"
 35 #include "opto/type.hpp"
 36 #include "castnode.hpp"
 37 #include "utilities/checkedCast.hpp"
 38 
 39 //=============================================================================
 40 // If input is already higher or equal to cast type, then this is an identity.
 41 Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
 42   if (_dependency == UnconditionalDependency) {
 43     return this;
 44   }
 45   Node* dom = dominating_cast(phase, phase);
 46   if (dom != nullptr) {
 47     return dom;
 48   }
 49   return higher_equal_types(phase, in(1)) ? in(1) : this;
 50 }
 51 
 52 //------------------------------Value------------------------------------------
 53 // Take 'join' of input and cast-up type
 54 const Type* ConstraintCastNode::Value(PhaseGVN* phase) const {
 55   if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
 56 
 57   const Type* in_type = phase->type(in(1));
 58   const Type* ft = in_type->filter_speculative(_type);
 59 
 60   // Check if both _type and in_type had a speculative type, but for the just
 61   // computed ft the speculative type was dropped.
 62   if (ft->speculative() == nullptr &&
 63       _type->speculative() != nullptr &&
 64       in_type->speculative() != nullptr) {
 65     // Speculative type may have disagreed between cast and input, and was
 66     // dropped in filtering. Recompute so that ft can take speculative type
 67     // of in_type. If we did not do it now, a subsequent ::Value call would
 68     // do it, and violate idempotence of ::Value.
 69     ft = in_type->filter_speculative(ft);
 70   }
 71 
 72 #ifdef ASSERT
 73   // Previous versions of this function had some special case logic,
 74   // which is no longer necessary.  Make sure of the required effects.
 75   switch (Opcode()) {
 76     case Op_CastII:
 77     {
 78       if (in_type == Type::TOP) {
 79         assert(ft == Type::TOP, "special case #1");
 80       }
 81       const Type* rt = in_type->join_speculative(_type);
 82       if (rt->empty()) {
 83         assert(ft == Type::TOP, "special case #2");
 84       }
 85       break;
 86     }
 87     case Op_CastPP:
 88     if (in_type == TypePtr::NULL_PTR &&
 89         _type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull) {
 90       assert(ft == Type::TOP, "special case #3");
 91       break;
 92     }
 93   }
 94 #endif //ASSERT
 95 
 96   return ft;
 97 }
 98 
 99 //------------------------------Ideal------------------------------------------
100 // Return a node which is more "ideal" than the current node.  Strip out
101 // control copies
102 Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape) {
103   if (in(0) != nullptr && remove_dead_region(phase, can_reshape)) {
104     return this;
105   }
106 
107   // Push cast through InlineTypeNode
108   InlineTypeNode* vt = in(1)->isa_InlineType();
109   if (vt != nullptr && phase->type(vt)->filter_speculative(_type) != Type::TOP) {
110     Node* cast = clone();
111     cast->set_req(1, vt->get_oop());
112     vt = vt->clone()->as_InlineType();
113     if (!_type->maybe_null()) {
114       vt->as_InlineType()->set_is_init(*phase);
115     }
116     vt->set_oop(*phase, phase->transform(cast));
117     return vt;
118   }
119 
120   if (in(1) != nullptr && phase->type(in(1)) != Type::TOP) {
121     return TypeNode::Ideal(phase, can_reshape);
122   }
123 
124   return nullptr;
125 }
126 
127 uint ConstraintCastNode::hash() const {
128   return TypeNode::hash() + (int)_dependency + (_extra_types != nullptr ? _extra_types->hash() : 0);
129 }
130 
131 bool ConstraintCastNode::cmp(const Node &n) const {
132   if (!TypeNode::cmp(n)) {
133     return false;
134   }
135   ConstraintCastNode& cast = (ConstraintCastNode&) n;
136   if (cast._dependency != _dependency) {
137     return false;
138   }
139   if (_extra_types == nullptr || cast._extra_types == nullptr) {
140     return _extra_types == cast._extra_types;
141   }
142   return _extra_types->eq(cast._extra_types);
143 }
144 
145 uint ConstraintCastNode::size_of() const {
146   return sizeof(*this);
147 }
148 
149 Node* ConstraintCastNode::make_cast_for_basic_type(Node* c, Node* n, const Type* t, DependencyType dependency, BasicType bt) {
150   switch(bt) {
151   case T_INT:
152     return new CastIINode(c, n, t, dependency);
153   case T_LONG:
154     return new CastLLNode(c, n, t, dependency);
155   default:
156     fatal("Bad basic type %s", type2name(bt));
157   }
158   return nullptr;
159 }
160 
161 TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const {
162   if (_dependency == UnconditionalDependency) {
163     return nullptr;
164   }
165   Node* val = in(1);
166   Node* ctl = in(0);
167   int opc = Opcode();
168   if (ctl == nullptr) {
169     return nullptr;
170   }
171   // Range check CastIIs may all end up under a single range check and
172   // in that case only the narrower CastII would be kept by the code
173   // below which would be incorrect.
174   if (is_CastII() && as_CastII()->has_range_check()) {
175     return nullptr;
176   }
177   if (type()->isa_rawptr() && (gvn->type_or_null(val) == nullptr || gvn->type(val)->isa_oopptr())) {
178     return nullptr;
179   }
180   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
181     Node* u = val->fast_out(i);
182     if (u != this &&
183         u->outcnt() > 0 &&
184         u->Opcode() == opc &&
185         u->in(0) != nullptr &&
186         higher_equal_types(gvn, u)) {
187       if (pt->is_dominator(u->in(0), ctl)) {
188         return u->as_Type();
189       }
190       if (is_CheckCastPP() && u->in(1)->is_Proj() && u->in(1)->in(0)->is_Allocate() &&
191           u->in(0)->is_Proj() && u->in(0)->in(0)->is_Initialize() &&
192           u->in(1)->in(0)->as_Allocate()->initialization() == u->in(0)->in(0)) {
193         // CheckCastPP following an allocation always dominates all
194         // use of the allocation result
195         return u->as_Type();
196       }
197     }
198   }
199   return nullptr;
200 }
201 
202 bool ConstraintCastNode::higher_equal_types(PhaseGVN* phase, const Node* other) const {
203   const Type* t = phase->type(other);
204   if (!t->higher_equal_speculative(type())) {
205     return false;
206   }
207   if (_extra_types != nullptr) {
208     for (uint i = 0; i < _extra_types->cnt(); ++i) {
209       if (!t->higher_equal_speculative(_extra_types->field_at(i))) {
210         return false;
211       }
212     }
213   }
214   return true;
215 }
216 
217 #ifndef PRODUCT
218 void ConstraintCastNode::dump_spec(outputStream *st) const {
219   TypeNode::dump_spec(st);
220   if (_extra_types != nullptr) {
221     st->print(" extra types: ");
222     _extra_types->dump_on(st);
223   }
224   if (_dependency != RegularDependency) {
225     st->print(" %s dependency", _dependency == StrongDependency ? "strong" : "unconditional");
226   }
227 }
228 #endif
229 
230 const Type* CastIINode::Value(PhaseGVN* phase) const {
231   const Type *res = ConstraintCastNode::Value(phase);
232   if (res == Type::TOP) {
233     return Type::TOP;
234   }
235   assert(res->isa_int(), "res must be int");
236 
237   // Similar to ConvI2LNode::Value() for the same reasons
238   // see if we can remove type assertion after loop opts
239   res = widen_type(phase, res, T_INT);
240 
241   return res;
242 }
243 
244 Node* ConstraintCastNode::find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, const TypeInteger* type) const {
245   Node* n = clone();
246   n->set_req(1, parent);
247   n->as_ConstraintCast()->set_type(type);
248   Node* existing = igvn->hash_find_insert(n);
249   if (existing != nullptr) {
250     n->destruct(igvn);
251     return existing;
252   }
253   return igvn->register_new_node_with_optimizer(n);
254 }
255 
256 Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) {
257   Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
258   if (progress != nullptr) {
259     return progress;
260   }
261   if (can_reshape && !phase->C->post_loop_opts_phase()) {
262     // makes sure we run ::Value to potentially remove type assertion after loop opts
263     phase->C->record_for_post_loop_opts_igvn(this);
264   }
265   if (!_range_check_dependency || phase->C->post_loop_opts_phase()) {
266     return optimize_integer_cast(phase, T_INT);
267   }
268   phase->C->record_for_post_loop_opts_igvn(this);
269   return nullptr;
270 }
271 
272 Node* CastIINode::Identity(PhaseGVN* phase) {
273   Node* progress = ConstraintCastNode::Identity(phase);
274   if (progress != this) {
275     return progress;
276   }
277   return this;
278 }
279 
280 bool CastIINode::cmp(const Node &n) const {
281   return ConstraintCastNode::cmp(n) && ((CastIINode&)n)._range_check_dependency == _range_check_dependency;
282 }
283 
284 uint CastIINode::size_of() const {
285   return sizeof(*this);
286 }
287 
288 #ifndef PRODUCT
289 void CastIINode::dump_spec(outputStream* st) const {
290   ConstraintCastNode::dump_spec(st);
291   if (_range_check_dependency) {
292     st->print(" range check dependency");
293   }
294 }
295 #endif
296 
297 CastIINode* CastIINode::pin_array_access_node() const {
298   assert(_dependency == RegularDependency, "already pinned");
299   if (has_range_check()) {
300     return new CastIINode(in(0), in(1), bottom_type(), StrongDependency, has_range_check());
301   }
302   return nullptr;
303 }
304 
305 void CastIINode::remove_range_check_cast(Compile* C) {
306   if (has_range_check()) {
307     // Range check CastII nodes feed into an address computation subgraph. Remove them to let that subgraph float freely.
308     // For memory access or integer divisions nodes that depend on the cast, record the dependency on the cast's control
309     // as a precedence edge, so they can't float above the cast in case that cast's narrowed type helped eliminate a
310     // range check or a null divisor check.
311     assert(in(0) != nullptr, "All RangeCheck CastII must have a control dependency");
312     ResourceMark rm;
313     Unique_Node_List wq;
314     wq.push(this);
315     for (uint next = 0; next < wq.size(); ++next) {
316       Node* m = wq.at(next);
317       for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
318         Node* use = m->fast_out(i);
319         if (use->is_Mem() || use->is_div_or_mod(T_INT) || use->is_div_or_mod(T_LONG)) {
320           use->ensure_control_or_add_prec(in(0));
321         } else if (!use->is_CFG() && !use->is_Phi()) {
322           wq.push(use);
323         }
324       }
325     }
326     subsume_by(in(1), C);
327     if (outcnt() == 0) {
328       disconnect_inputs(C);
329     }
330   }
331 }
332 
333 
334 const Type* CastLLNode::Value(PhaseGVN* phase) const {
335   const Type* res = ConstraintCastNode::Value(phase);
336   if (res == Type::TOP) {
337     return Type::TOP;
338   }
339   assert(res->isa_long(), "res must be long");
340 
341   return widen_type(phase, res, T_LONG);
342 }
343 
344 Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
345   Node* progress = ConstraintCastNode::Ideal(phase, can_reshape);
346   if (progress != nullptr) {
347     return progress;
348   }
349   if (!phase->C->post_loop_opts_phase()) {
350     // makes sure we run ::Value to potentially remove type assertion after loop opts
351     phase->C->record_for_post_loop_opts_igvn(this);
352   }
353   // transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of
354   // the ConvI2L.
355   Node* in1 = in(1);
356   if (in1 != nullptr && in1->Opcode() == Op_ConvI2L) {
357     const Type* t = Value(phase);
358     const Type* t_in = phase->type(in1);
359     if (t != Type::TOP && t_in != Type::TOP) {
360       const TypeLong* tl = t->is_long();
361       const TypeLong* t_in_l = t_in->is_long();
362       assert(tl->_lo >= t_in_l->_lo && tl->_hi <= t_in_l->_hi, "CastLL type should be narrower than or equal to the type of its input");
363       assert((tl != t_in_l) == (tl->_lo > t_in_l->_lo || tl->_hi < t_in_l->_hi), "if type differs then this nodes's type must be narrower");
364       if (tl != t_in_l) {
365         const TypeInt* ti = TypeInt::make(checked_cast<jint>(tl->_lo), checked_cast<jint>(tl->_hi), tl->_widen);
366         Node* castii = phase->transform(new CastIINode(in(0), in1->in(1), ti));
367         Node* convi2l = in1->clone();
368         convi2l->set_req(1, castii);
369         return convi2l;
370       }
371     }
372   }
373   return optimize_integer_cast(phase, T_LONG);
374 }
375 
376 //=============================================================================
377 //------------------------------Identity---------------------------------------
378 // If input is already higher or equal to cast type, then this is an identity.
379 Node* CheckCastPPNode::Identity(PhaseGVN* phase) {
380   if (in(1)->is_InlineType() && _type->isa_instptr() && phase->type(in(1))->inline_klass()->is_subtype_of(_type->is_instptr()->instance_klass())) {
381     return in(1);
382   }
383   return ConstraintCastNode::Identity(phase);
384 }
385 
386 //------------------------------Value------------------------------------------
387 // Take 'join' of input and cast-up type, unless working with an Interface
388 const Type* CheckCastPPNode::Value(PhaseGVN* phase) const {
389   if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
390 
391   const Type *inn = phase->type(in(1));
392   if( inn == Type::TOP ) return Type::TOP;  // No information yet
393 
394   if (inn->isa_oopptr() && _type->isa_oopptr()) {
395     return ConstraintCastNode::Value(phase);
396   }
397 
398   const TypePtr *in_type = inn->isa_ptr();
399   const TypePtr *my_type = _type->isa_ptr();
400   const Type *result = _type;
401   if (in_type != nullptr && my_type != nullptr) {
402     // TODO 8302672
403     if (!StressReflectiveCode && my_type->isa_aryptr() && in_type->isa_aryptr()) {
404       // Propagate array properties (not flat/null-free)
405       // Don't do this when StressReflectiveCode is enabled because it might lead to
406       // a dying data path while the corresponding flat/null-free check is not folded.
407       my_type = my_type->is_aryptr()->update_properties(in_type->is_aryptr());
408       if (my_type == nullptr) {
409         return Type::TOP; // Inconsistent properties
410       }
411     }
412     TypePtr::PTR in_ptr = in_type->ptr();
413     if (in_ptr == TypePtr::Null) {
414       result = in_type;
415     } else if (in_ptr != TypePtr::Constant) {
416       result = my_type->cast_to_ptr_type(my_type->join_ptr(in_ptr));
417     }
418   }
419 
420   return result;
421 }
422 
423 //=============================================================================
424 //------------------------------Value------------------------------------------
425 const Type* CastX2PNode::Value(PhaseGVN* phase) const {
426   const Type* t = phase->type(in(1));
427   if (t == Type::TOP) return Type::TOP;
428   if (t->base() == Type_X && t->singleton()) {
429     uintptr_t bits = (uintptr_t) t->is_intptr_t()->get_con();
430     if (bits == 0)   return TypePtr::NULL_PTR;
431     return TypeRawPtr::make((address) bits);
432   }
433   return CastX2PNode::bottom_type();
434 }
435 
436 //------------------------------Idealize---------------------------------------
437 static inline bool fits_in_int(const Type* t, bool but_not_min_int = false) {
438   if (t == Type::TOP)  return false;
439   const TypeX* tl = t->is_intptr_t();
440   jint lo = min_jint;
441   jint hi = max_jint;
442   if (but_not_min_int)  ++lo;  // caller wants to negate the value w/o overflow
443   return (tl->_lo >= lo) && (tl->_hi <= hi);
444 }
445 
446 static inline Node* addP_of_X2P(PhaseGVN *phase,
447                                 Node* base,
448                                 Node* dispX,
449                                 bool negate = false) {
450   if (negate) {
451     dispX = phase->transform(new SubXNode(phase->MakeConX(0), dispX));
452   }
453   return new AddPNode(phase->C->top(),
454                       phase->transform(new CastX2PNode(base)),
455                       dispX);
456 }
457 
458 Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
459   // convert CastX2P(AddX(x, y)) to AddP(CastX2P(x), y) if y fits in an int
460   int op = in(1)->Opcode();
461   Node* x;
462   Node* y;
463   switch (op) {
464     case Op_SubX:
465     x = in(1)->in(1);
466     // Avoid ideal transformations ping-pong between this and AddP for raw pointers.
467     if (phase->find_intptr_t_con(x, -1) == 0)
468     break;
469     y = in(1)->in(2);
470     if (fits_in_int(phase->type(y), true)) {
471       return addP_of_X2P(phase, x, y, true);
472     }
473     break;
474     case Op_AddX:
475     x = in(1)->in(1);
476     y = in(1)->in(2);
477     if (fits_in_int(phase->type(y))) {
478       return addP_of_X2P(phase, x, y);
479     }
480     if (fits_in_int(phase->type(x))) {
481       return addP_of_X2P(phase, y, x);
482     }
483     break;
484   }
485   return nullptr;
486 }
487 
488 //------------------------------Identity---------------------------------------
489 Node* CastX2PNode::Identity(PhaseGVN* phase) {
490   if (in(1)->Opcode() == Op_CastP2X)  return in(1)->in(1);
491   return this;
492 }
493 
494 //=============================================================================
495 //------------------------------Value------------------------------------------
496 const Type* CastP2XNode::Value(PhaseGVN* phase) const {
497   const Type* t = phase->type(in(1));
498   if (t == Type::TOP) return Type::TOP;
499   if (t->base() == Type::RawPtr && t->singleton()) {
500     uintptr_t bits = (uintptr_t) t->is_rawptr()->get_con();
501     return TypeX::make(bits);
502   }
503 
504   if (t->is_zero_type() || !t->maybe_null()) {
505     for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
506       Node* u = fast_out(i);
507       if (u->Opcode() == Op_OrL) {
508         for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
509           Node* cmp = u->fast_out(j);
510           if (cmp->Opcode() == Op_CmpL) {
511             // Give CmpL a chance to get optimized
512             phase->record_for_igvn(cmp);
513           }
514         }
515       }
516     }
517   }
518 
519   return CastP2XNode::bottom_type();
520 }
521 
522 Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) {
523   return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr;
524 }
525 
526 //------------------------------Identity---------------------------------------
527 Node* CastP2XNode::Identity(PhaseGVN* phase) {
528   if (in(1)->Opcode() == Op_CastX2P)  return in(1)->in(1);
529   return this;
530 }
531 
532 Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency,
533                                              const TypeTuple* types) {
534   if (type->isa_int()) {
535     return new CastIINode(c, in, type, dependency, false, types);
536   } else if (type->isa_long()) {
537     return new CastLLNode(c, in, type, dependency, types);
538   } else if (type->isa_half_float()) {
539     return new CastHHNode(c, in, type, dependency, types);
540   } else if (type->isa_float()) {
541     return new CastFFNode(c, in, type, dependency, types);
542   } else if (type->isa_double()) {
543     return new CastDDNode(c, in, type, dependency, types);
544   } else if (type->isa_vect()) {
545     return new CastVVNode(c, in, type, dependency, types);
546   } else if (type->isa_ptr()) {
547     return new CastPPNode(c, in, type, dependency, types);
548   }
549   fatal("unreachable. Invalid cast type.");
550   return nullptr;
551 }
552 
553 Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) {
554   PhaseIterGVN *igvn = phase->is_IterGVN();
555   const TypeInteger* this_type = this->type()->is_integer(bt);
556   Node* z = in(1);
557   const TypeInteger* rx = nullptr;
558   const TypeInteger* ry = nullptr;
559   // Similar to ConvI2LNode::Ideal() for the same reasons
560   if (Compile::push_thru_add(phase, z, this_type, rx, ry, bt, bt)) {
561     if (igvn == nullptr) {
562       // Postpone this optimization to iterative GVN, where we can handle deep
563       // AddI chains without an exponential number of recursive Ideal() calls.
564       phase->record_for_igvn(this);
565       return nullptr;
566     }
567     int op = z->Opcode();
568     Node* x = z->in(1);
569     Node* y = z->in(2);
570 
571     Node* cx = find_or_make_integer_cast(igvn, x, rx);
572     Node* cy = find_or_make_integer_cast(igvn, y, ry);
573     if (op == Op_Add(bt)) {
574       return AddNode::make(cx, cy, bt);
575     } else {
576       assert(op == Op_Sub(bt), "");
577       return SubNode::make(cx, cy, bt);
578     }
579     return nullptr;
580   }
581   return nullptr;
582 }
583 
584 const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const {
585   if (!phase->C->post_loop_opts_phase()) {
586     return res;
587   }
588   const TypeInteger* this_type = res->is_integer(bt);
589   const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt);
590   if (in_type != nullptr &&
591       (in_type->lo_as_long() != this_type->lo_as_long() ||
592        in_type->hi_as_long() != this_type->hi_as_long())) {
593     jlong lo1 = this_type->lo_as_long();
594     jlong hi1 = this_type->hi_as_long();
595     int w1 = this_type->_widen;
596     if (lo1 >= 0) {
597       // Keep a range assertion of >=0.
598       lo1 = 0;        hi1 = max_signed_integer(bt);
599     } else if (hi1 < 0) {
600       // Keep a range assertion of <0.
601       lo1 = min_signed_integer(bt); hi1 = -1;
602     } else {
603       lo1 = min_signed_integer(bt); hi1 = max_signed_integer(bt);
604     }
605     return TypeInteger::make(MAX2(in_type->lo_as_long(), lo1),
606                              MIN2(in_type->hi_as_long(), hi1),
607                              MAX2((int)in_type->_widen, w1), bt);
608   }
609   return res;
610 }
--- EOF ---