428 // Must this parse be aborted?
429 bool failing() { return C->failing(); }
430
431 Block* rpo_at(int rpo) {
432 assert(0 <= rpo && rpo < _block_count, "oob");
433 return &_blocks[rpo];
434 }
435 Block* start_block() {
436 return rpo_at(flow()->start_block()->rpo());
437 }
438 // Can return null if the flow pass did not complete a block.
439 Block* successor_for_bci(int bci) {
440 return block()->successor_for_bci(bci);
441 }
442
443 private:
444 // Create a JVMS & map for the initial state of this method.
445 SafePointNode* create_entry_map();
446
447 // OSR helpers
448 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
449 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
450 void load_interpreter_state(Node* osr_buf);
451
452 // Functions for managing basic blocks:
453 void init_blocks();
454 void load_state_from(Block* b);
455 void store_state_to(Block* b) { b->record_state(this); }
456
457 // Parse all the basic blocks.
458 void do_all_blocks();
459
460 // Parse the current basic block
461 void do_one_block();
462
463 // Raise an error if we get a bad ciTypeFlow CFG.
464 void handle_missing_successor(int bci);
465
466 // first actions (before BCI 0)
467 void do_method_entry();
468
474 void ensure_phis_everywhere();
475
476 // Merge the current mapping into the basic block starting at bci
477 void merge( int target_bci);
478 // Same as plain merge, except that it allocates a new path number.
479 void merge_new_path( int target_bci);
480 // Merge the current mapping into an exception handler.
481 void merge_exception(int target_bci);
482 // Helper: Merge the current mapping into the given basic block
483 void merge_common(Block* target, int pnum);
484 // Helper functions for merging individual cells.
485 PhiNode *ensure_phi( int idx, bool nocreate = false);
486 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
487 // Helper to merge the current memory state into the given basic block
488 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
489
490 // Parse this bytecode, and alter the Parsers JVM->Node mapping
491 void do_one_bytecode();
492
493 // helper function to generate array store check
494 void array_store_check();
495 // Helper function to generate array load
496 void array_load(BasicType etype);
497 // Helper function to generate array store
498 void array_store(BasicType etype);
499 // Helper function to compute array addressing
500 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
501
502 void clinit_deopt();
503
504 void rtm_deopt();
505
506 // Pass current map to exits
507 void return_current(Node* value);
508
509 // Register finalizers on return from Object.<init>
510 void call_register_finalizer();
511
512 // Insert a compiler safepoint into the graph
513 void add_safepoint();
514
515 // Insert a compiler safepoint into the graph, if there is a back-branch.
516 void maybe_add_safepoint(int target_bci) {
517 if (target_bci <= bci()) {
518 add_safepoint();
519 }
520 }
529
530 // Helper functions for type checking bytecodes:
531 void do_checkcast();
532 void do_instanceof();
533
534 // Helper functions for shifting & arithmetic
535 void modf();
536 void modd();
537 void l2f();
538
539 // implementation of _get* and _put* bytecodes
540 void do_getstatic() { do_field_access(true, false); }
541 void do_getfield () { do_field_access(true, true); }
542 void do_putstatic() { do_field_access(false, false); }
543 void do_putfield () { do_field_access(false, true); }
544
545 // common code for making initial checks and forming addresses
546 void do_field_access(bool is_get, bool is_field);
547
548 // common code for actually performing the load or store
549 void do_get_xxx(Node* obj, ciField* field, bool is_field);
550 void do_put_xxx(Node* obj, ciField* field, bool is_field);
551
552 // implementation of object creation bytecodes
553 void do_new();
554 void do_newarray(BasicType elemtype);
555 void do_anewarray();
556 void do_multianewarray();
557 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
558
559 // implementation of jsr/ret
560 void do_jsr();
561 void do_ret();
562
563 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
564 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
565 bool seems_never_taken(float prob) const;
566 bool path_is_suitable_for_uncommon_trap(float prob) const;
567
568 void do_ifnull(BoolTest::mask btest, Node* c);
569 void do_if(BoolTest::mask btest, Node* c);
570 int repush_if_args();
571 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
572 void sharpen_type_after_if(BoolTest::mask btest,
573 Node* con, const Type* tcon,
574 Node* val, const Type* tval);
575 void maybe_add_predicate_after_if(Block* path);
576 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
577 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
578 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
579 void jump_if_always_fork(int dest_bci_if_true, bool unc);
580
581 friend class SwitchRange;
582 void do_tableswitch();
583 void do_lookupswitch();
584 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
585 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
586 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
587
588 // helper function for call statistics
589 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
590
591 Node_Notes* make_node_notes(Node_Notes* caller_nn);
|
428 // Must this parse be aborted?
429 bool failing() { return C->failing(); }
430
431 Block* rpo_at(int rpo) {
432 assert(0 <= rpo && rpo < _block_count, "oob");
433 return &_blocks[rpo];
434 }
435 Block* start_block() {
436 return rpo_at(flow()->start_block()->rpo());
437 }
438 // Can return null if the flow pass did not complete a block.
439 Block* successor_for_bci(int bci) {
440 return block()->successor_for_bci(bci);
441 }
442
443 private:
444 // Create a JVMS & map for the initial state of this method.
445 SafePointNode* create_entry_map();
446
447 // OSR helpers
448 Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs, Node* local_addrs_base);
449 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
450 void load_interpreter_state(Node* osr_buf);
451
452 // Functions for managing basic blocks:
453 void init_blocks();
454 void load_state_from(Block* b);
455 void store_state_to(Block* b) { b->record_state(this); }
456
457 // Parse all the basic blocks.
458 void do_all_blocks();
459
460 // Parse the current basic block
461 void do_one_block();
462
463 // Raise an error if we get a bad ciTypeFlow CFG.
464 void handle_missing_successor(int bci);
465
466 // first actions (before BCI 0)
467 void do_method_entry();
468
474 void ensure_phis_everywhere();
475
476 // Merge the current mapping into the basic block starting at bci
477 void merge( int target_bci);
478 // Same as plain merge, except that it allocates a new path number.
479 void merge_new_path( int target_bci);
480 // Merge the current mapping into an exception handler.
481 void merge_exception(int target_bci);
482 // Helper: Merge the current mapping into the given basic block
483 void merge_common(Block* target, int pnum);
484 // Helper functions for merging individual cells.
485 PhiNode *ensure_phi( int idx, bool nocreate = false);
486 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
487 // Helper to merge the current memory state into the given basic block
488 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
489
490 // Parse this bytecode, and alter the Parsers JVM->Node mapping
491 void do_one_bytecode();
492
493 // helper function to generate array store check
494 Node* array_store_check(Node*& adr, const Type*& elemtype);
495 // Helper function to generate array load
496 void array_load(BasicType etype);
497 // Helper function to generate array store
498 void array_store(BasicType etype);
499 // Helper function to compute array addressing
500 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
501 bool needs_range_check(const TypeInt* size_type, const Node* index) const;
502 Node* create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type, const Type*& element_type);
503 Node* cast_to_speculative_array_type(Node* array, const TypeAryPtr*& array_type, const Type*& element_type);
504 Node* cast_to_profiled_array_type(Node* const array);
505 Node* speculate_non_null_free_array(Node* array, const TypeAryPtr*& array_type);
506 Node* speculate_non_flat_array(Node* array, const TypeAryPtr* array_type);
507 void create_range_check(Node* idx, Node* ary, const TypeInt* sizetype);
508 Node* record_profile_for_speculation_at_array_load(Node* ld);
509
510 void clinit_deopt();
511
512 void rtm_deopt();
513
514 // Pass current map to exits
515 void return_current(Node* value);
516
517 // Register finalizers on return from Object.<init>
518 void call_register_finalizer();
519
520 // Insert a compiler safepoint into the graph
521 void add_safepoint();
522
523 // Insert a compiler safepoint into the graph, if there is a back-branch.
524 void maybe_add_safepoint(int target_bci) {
525 if (target_bci <= bci()) {
526 add_safepoint();
527 }
528 }
537
538 // Helper functions for type checking bytecodes:
539 void do_checkcast();
540 void do_instanceof();
541
542 // Helper functions for shifting & arithmetic
543 void modf();
544 void modd();
545 void l2f();
546
547 // implementation of _get* and _put* bytecodes
548 void do_getstatic() { do_field_access(true, false); }
549 void do_getfield () { do_field_access(true, true); }
550 void do_putstatic() { do_field_access(false, false); }
551 void do_putfield () { do_field_access(false, true); }
552
553 // common code for making initial checks and forming addresses
554 void do_field_access(bool is_get, bool is_field);
555
556 // common code for actually performing the load or store
557 void do_get_xxx(Node* obj, ciField* field);
558 void do_put_xxx(Node* obj, ciField* field, bool is_field);
559
560 ciType* improve_abstract_inline_type_klass(ciType* field_klass);
561
562 // implementation of object creation bytecodes
563 void do_new();
564 void do_newarray(BasicType elemtype);
565 void do_newarray();
566 void do_multianewarray();
567 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
568
569 // implementation of jsr/ret
570 void do_jsr();
571 void do_ret();
572
573 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
574 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
575 bool seems_never_taken(float prob) const;
576 bool path_is_suitable_for_uncommon_trap(float prob) const;
577
578 void do_ifnull(BoolTest::mask btest, Node* c);
579 void do_if(BoolTest::mask btest, Node* c, bool can_trap = true, bool new_path = false, Node** ctrl_taken = nullptr);
580 void do_acmp(BoolTest::mask btest, Node* left, Node* right);
581 void acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region);
582 void acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region);
583 Node* acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl);
584 void acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region);
585 int repush_if_args();
586 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap = true);
587 void sharpen_type_after_if(BoolTest::mask btest,
588 Node* con, const Type* tcon,
589 Node* val, const Type* tval);
590 void maybe_add_predicate_after_if(Block* path);
591 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
592 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
593 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
594 void jump_if_always_fork(int dest_bci_if_true, bool unc);
595
596 friend class SwitchRange;
597 void do_tableswitch();
598 void do_lookupswitch();
599 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
600 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
601 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
602
603 // helper function for call statistics
604 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
605
606 Node_Notes* make_node_notes(Node_Notes* caller_nn);
|