< prev index next >

src/hotspot/share/opto/parse.hpp

Print this page

428   // Must this parse be aborted?
429   bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
430 
431   Block* rpo_at(int rpo) {
432     assert(0 <= rpo && rpo < _block_count, "oob");
433     return &_blocks[rpo];
434   }
435   Block* start_block() {
436     return rpo_at(flow()->start_block()->rpo());
437   }
438   // Can return null if the flow pass did not complete a block.
439   Block* successor_for_bci(int bci) {
440     return block()->successor_for_bci(bci);
441   }
442 
443  private:
444   // Create a JVMS & map for the initial state of this method.
445   SafePointNode* create_entry_map();
446 
447   // OSR helpers
448   Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
449   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
450   void  load_interpreter_state(Node* osr_buf);
451 
452   // Functions for managing basic blocks:
453   void init_blocks();
454   void load_state_from(Block* b);
455   void store_state_to(Block* b) { b->record_state(this); }
456 
457   // Parse all the basic blocks.
458   void do_all_blocks();
459 
460   // Parse the current basic block
461   void do_one_block();
462 
463   // Raise an error if we get a bad ciTypeFlow CFG.
464   void handle_missing_successor(int bci);
465 
466   // first actions (before BCI 0)
467   void do_method_entry();
468 

474   void ensure_phis_everywhere();
475 
476   // Merge the current mapping into the basic block starting at bci
477   void merge(          int target_bci);
478   // Same as plain merge, except that it allocates a new path number.
479   void merge_new_path( int target_bci);
480   // Merge the current mapping into an exception handler.
481   void merge_exception(int target_bci);
482   // Helper: Merge the current mapping into the given basic block
483   void merge_common(Block* target, int pnum);
484   // Helper functions for merging individual cells.
485   PhiNode *ensure_phi(       int idx, bool nocreate = false);
486   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
487   // Helper to merge the current memory state into the given basic block
488   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
489 
490   // Parse this bytecode, and alter the Parsers JVM->Node mapping
491   void do_one_bytecode();
492 
493   // helper function to generate array store check
494   void array_store_check();
495   // Helper function to generate array load
496   void array_load(BasicType etype);
497   // Helper function to generate array store
498   void array_store(BasicType etype);
499   // Helper function to compute array addressing
500   Node* array_addressing(BasicType type, int vals, const Type*& elemtype);








501 
502   void clinit_deopt();
503 
504   // Pass current map to exits
505   void return_current(Node* value);
506 
507   // Register finalizers on return from Object.<init>
508   void call_register_finalizer();
509 
510   // Insert a compiler safepoint into the graph
511   void add_safepoint();
512 
513   // Insert a compiler safepoint into the graph, if there is a back-branch.
514   void maybe_add_safepoint(int target_bci) {
515     if (target_bci <= bci()) {
516       add_safepoint();
517     }
518   }
519 
520   // Note:  Intrinsic generation routines may be found in library_call.cpp.

527 
528   // Helper functions for type checking bytecodes:
529   void  do_checkcast();
530   void  do_instanceof();
531 
532   // Helper functions for shifting & arithmetic
533   void modf();
534   void modd();
535   void l2f();
536 
537   // implementation of _get* and _put* bytecodes
538   void do_getstatic() { do_field_access(true,  false); }
539   void do_getfield () { do_field_access(true,  true); }
540   void do_putstatic() { do_field_access(false, false); }
541   void do_putfield () { do_field_access(false, true); }
542 
543   // common code for making initial checks and forming addresses
544   void do_field_access(bool is_get, bool is_field);
545 
546   // common code for actually performing the load or store
547   void do_get_xxx(Node* obj, ciField* field, bool is_field);
548   void do_put_xxx(Node* obj, ciField* field, bool is_field);



549 
550   // implementation of object creation bytecodes
551   void do_new();
552   void do_newarray(BasicType elemtype);
553   void do_anewarray();
554   void do_multianewarray();
555   Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
556 
557   // implementation of jsr/ret
558   void do_jsr();
559   void do_ret();
560 
561   float   dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
562   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
563   bool    seems_never_taken(float prob) const;
564   bool    path_is_suitable_for_uncommon_trap(float prob) const;
565 
566   void    do_ifnull(BoolTest::mask btest, Node* c);
567   void    do_if(BoolTest::mask btest, Node* c);





568   int     repush_if_args();
569   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
570   void    sharpen_type_after_if(BoolTest::mask btest,
571                                 Node* con, const Type* tcon,
572                                 Node* val, const Type* tval);
573   void    maybe_add_predicate_after_if(Block* path);
574   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
575   void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
576   void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
577   void    jump_if_always_fork(int dest_bci_if_true, bool unc);
578 
579   friend class SwitchRange;
580   void    do_tableswitch();
581   void    do_lookupswitch();
582   void    jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
583   bool    create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
584   void    linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
585 
586   // helper function for call statistics
587   void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
588 
589   Node_Notes* make_node_notes(Node_Notes* caller_nn);

428   // Must this parse be aborted?
429   bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
430 
431   Block* rpo_at(int rpo) {
432     assert(0 <= rpo && rpo < _block_count, "oob");
433     return &_blocks[rpo];
434   }
435   Block* start_block() {
436     return rpo_at(flow()->start_block()->rpo());
437   }
438   // Can return null if the flow pass did not complete a block.
439   Block* successor_for_bci(int bci) {
440     return block()->successor_for_bci(bci);
441   }
442 
443  private:
444   // Create a JVMS & map for the initial state of this method.
445   SafePointNode* create_entry_map();
446 
447   // OSR helpers
448   Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs, Node* local_addrs_base);
449   Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
450   void  load_interpreter_state(Node* osr_buf);
451 
452   // Functions for managing basic blocks:
453   void init_blocks();
454   void load_state_from(Block* b);
455   void store_state_to(Block* b) { b->record_state(this); }
456 
457   // Parse all the basic blocks.
458   void do_all_blocks();
459 
460   // Parse the current basic block
461   void do_one_block();
462 
463   // Raise an error if we get a bad ciTypeFlow CFG.
464   void handle_missing_successor(int bci);
465 
466   // first actions (before BCI 0)
467   void do_method_entry();
468 

474   void ensure_phis_everywhere();
475 
476   // Merge the current mapping into the basic block starting at bci
477   void merge(          int target_bci);
478   // Same as plain merge, except that it allocates a new path number.
479   void merge_new_path( int target_bci);
480   // Merge the current mapping into an exception handler.
481   void merge_exception(int target_bci);
482   // Helper: Merge the current mapping into the given basic block
483   void merge_common(Block* target, int pnum);
484   // Helper functions for merging individual cells.
485   PhiNode *ensure_phi(       int idx, bool nocreate = false);
486   PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
487   // Helper to merge the current memory state into the given basic block
488   void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
489 
490   // Parse this bytecode, and alter the Parsers JVM->Node mapping
491   void do_one_bytecode();
492 
493   // helper function to generate array store check
494   Node* array_store_check(Node*& adr, const Type*& elemtype);
495   // Helper function to generate array load
496   void array_load(BasicType etype);
497   // Helper function to generate array store
498   void array_store(BasicType etype);
499   // Helper function to compute array addressing
500   Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
501   bool needs_range_check(const TypeInt* size_type, const Node* index) const;
502   Node* create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type, const Type*& element_type);
503   Node* cast_to_speculative_array_type(Node* array, const TypeAryPtr*& array_type, const Type*& element_type);
504   Node* cast_to_profiled_array_type(Node* const array);
505   Node* speculate_non_null_free_array(Node* array, const TypeAryPtr*& array_type);
506   Node* speculate_non_flat_array(Node* array, const TypeAryPtr* array_type);
507   void create_range_check(Node* idx, Node* ary, const TypeInt* sizetype);
508   Node* record_profile_for_speculation_at_array_load(Node* ld);
509 
510   void clinit_deopt();
511 
512   // Pass current map to exits
513   void return_current(Node* value);
514 
515   // Register finalizers on return from Object.<init>
516   void call_register_finalizer();
517 
518   // Insert a compiler safepoint into the graph
519   void add_safepoint();
520 
521   // Insert a compiler safepoint into the graph, if there is a back-branch.
522   void maybe_add_safepoint(int target_bci) {
523     if (target_bci <= bci()) {
524       add_safepoint();
525     }
526   }
527 
528   // Note:  Intrinsic generation routines may be found in library_call.cpp.

535 
536   // Helper functions for type checking bytecodes:
537   void  do_checkcast();
538   void  do_instanceof();
539 
540   // Helper functions for shifting & arithmetic
541   void modf();
542   void modd();
543   void l2f();
544 
545   // implementation of _get* and _put* bytecodes
546   void do_getstatic() { do_field_access(true,  false); }
547   void do_getfield () { do_field_access(true,  true); }
548   void do_putstatic() { do_field_access(false, false); }
549   void do_putfield () { do_field_access(false, true); }
550 
551   // common code for making initial checks and forming addresses
552   void do_field_access(bool is_get, bool is_field);
553 
554   // common code for actually performing the load or store
555   void do_get_xxx(Node* obj, ciField* field);
556   void do_put_xxx(Node* obj, ciField* field, bool is_field);
557   void set_inline_type_field(Node* obj, ciField* field, Node* val);
558 
559   ciType* improve_abstract_inline_type_klass(ciType* field_klass);
560 
561   // implementation of object creation bytecodes
562   void do_new();
563   void do_newarray(BasicType elemtype);
564   void do_newarray();
565   void do_multianewarray();
566   Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
567 
568   // implementation of jsr/ret
569   void do_jsr();
570   void do_ret();
571 
572   float   dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
573   float   branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
574   bool    seems_never_taken(float prob) const;
575   bool    path_is_suitable_for_uncommon_trap(float prob) const;
576 
577   void    do_ifnull(BoolTest::mask btest, Node* c);
578   void    do_if(BoolTest::mask btest, Node* c, bool can_trap = true, bool new_path = false, Node** ctrl_taken = nullptr);
579   void    do_acmp(BoolTest::mask btest, Node* left, Node* right);
580   void    acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region);
581   void    acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region);
582   Node*   acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl);
583   void    acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region);
584   int     repush_if_args();
585   void    adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap = true);
586   void    sharpen_type_after_if(BoolTest::mask btest,
587                                 Node* con, const Type* tcon,
588                                 Node* val, const Type* tval);
589   void    maybe_add_predicate_after_if(Block* path);
590   IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
591   void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
592   void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
593   void    jump_if_always_fork(int dest_bci_if_true, bool unc);
594 
595   friend class SwitchRange;
596   void    do_tableswitch();
597   void    do_lookupswitch();
598   void    jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
599   bool    create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
600   void    linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
601 
602   // helper function for call statistics
603   void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
604 
605   Node_Notes* make_node_notes(Node_Notes* caller_nn);
< prev index next >