402 // Must this parse be aborted?
403 bool failing() { return C->failing(); }
404
405 Block* rpo_at(int rpo) {
406 assert(0 <= rpo && rpo < _block_count, "oob");
407 return &_blocks[rpo];
408 }
409 Block* start_block() {
410 return rpo_at(flow()->start_block()->rpo());
411 }
412 // Can return NULL if the flow pass did not complete a block.
413 Block* successor_for_bci(int bci) {
414 return block()->successor_for_bci(bci);
415 }
416
417 private:
418 // Create a JVMS & map for the initial state of this method.
419 SafePointNode* create_entry_map();
420
421 // OSR helpers
422 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
423 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
424 void load_interpreter_state(Node* osr_buf);
425
426 // Functions for managing basic blocks:
427 void init_blocks();
428 void load_state_from(Block* b);
429 void store_state_to(Block* b) { b->record_state(this); }
430
431 // Parse all the basic blocks.
432 void do_all_blocks();
433
434 // Parse the current basic block
435 void do_one_block();
436
437 // Raise an error if we get a bad ciTypeFlow CFG.
438 void handle_missing_successor(int bci);
439
440 // first actions (before BCI 0)
441 void do_method_entry();
442
448 void ensure_phis_everywhere();
449
450 // Merge the current mapping into the basic block starting at bci
451 void merge( int target_bci);
452 // Same as plain merge, except that it allocates a new path number.
453 void merge_new_path( int target_bci);
454 // Merge the current mapping into an exception handler.
455 void merge_exception(int target_bci);
456 // Helper: Merge the current mapping into the given basic block
457 void merge_common(Block* target, int pnum);
458 // Helper functions for merging individual cells.
459 PhiNode *ensure_phi( int idx, bool nocreate = false);
460 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
461 // Helper to merge the current memory state into the given basic block
462 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
463
464 // Parse this bytecode, and alter the Parsers JVM->Node mapping
465 void do_one_bytecode();
466
467 // helper function to generate array store check
468 void array_store_check();
469 // Helper function to generate array load
470 void array_load(BasicType etype);
471 // Helper function to generate array store
472 void array_store(BasicType etype);
473 // Helper function to compute array addressing
474 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
475
476 void clinit_deopt();
477
478 void rtm_deopt();
479
480 // Pass current map to exits
481 void return_current(Node* value);
482
483 // Register finalizers on return from Object.<init>
484 void call_register_finalizer();
485
486 // Insert a compiler safepoint into the graph
487 void add_safepoint();
488
489 // Insert a compiler safepoint into the graph, if there is a back-branch.
490 void maybe_add_safepoint(int target_bci) {
491 if (target_bci <= bci()) {
492 add_safepoint();
493 }
494 }
503
504 // Helper functions for type checking bytecodes:
505 void do_checkcast();
506 void do_instanceof();
507
508 // Helper functions for shifting & arithmetic
509 void modf();
510 void modd();
511 void l2f();
512
513 // implementation of _get* and _put* bytecodes
514 void do_getstatic() { do_field_access(true, false); }
515 void do_getfield () { do_field_access(true, true); }
516 void do_putstatic() { do_field_access(false, false); }
517 void do_putfield () { do_field_access(false, true); }
518
519 // common code for making initial checks and forming addresses
520 void do_field_access(bool is_get, bool is_field);
521
522 // common code for actually performing the load or store
523 void do_get_xxx(Node* obj, ciField* field, bool is_field);
524 void do_put_xxx(Node* obj, ciField* field, bool is_field);
525
526 // implementation of object creation bytecodes
527 void do_new();
528 void do_newarray(BasicType elemtype);
529 void do_anewarray();
530 void do_multianewarray();
531 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
532
533 // implementation of jsr/ret
534 void do_jsr();
535 void do_ret();
536
537 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
538 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
539 bool seems_never_taken(float prob) const;
540 bool path_is_suitable_for_uncommon_trap(float prob) const;
541 bool seems_stable_comparison() const;
542
543 void do_ifnull(BoolTest::mask btest, Node* c);
544 void do_if(BoolTest::mask btest, Node* c);
545 int repush_if_args();
546 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
547 Block* path, Block* other_path);
548 void sharpen_type_after_if(BoolTest::mask btest,
549 Node* con, const Type* tcon,
550 Node* val, const Type* tval);
551 void maybe_add_predicate_after_if(Block* path);
552 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
553 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
554 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
555 void jump_if_always_fork(int dest_bci_if_true, bool unc);
556
557 friend class SwitchRange;
558 void do_tableswitch();
559 void do_lookupswitch();
560 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
561 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
562 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
563
564 void decrement_age();
565
566 // helper function for call statistics
567 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
|
402 // Must this parse be aborted?
403 bool failing() { return C->failing(); }
404
405 Block* rpo_at(int rpo) {
406 assert(0 <= rpo && rpo < _block_count, "oob");
407 return &_blocks[rpo];
408 }
409 Block* start_block() {
410 return rpo_at(flow()->start_block()->rpo());
411 }
412 // Can return NULL if the flow pass did not complete a block.
413 Block* successor_for_bci(int bci) {
414 return block()->successor_for_bci(bci);
415 }
416
417 private:
418 // Create a JVMS & map for the initial state of this method.
419 SafePointNode* create_entry_map();
420
421 // OSR helpers
422 Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs, Node* local_addrs_base);
423 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
424 void load_interpreter_state(Node* osr_buf);
425
426 // Functions for managing basic blocks:
427 void init_blocks();
428 void load_state_from(Block* b);
429 void store_state_to(Block* b) { b->record_state(this); }
430
431 // Parse all the basic blocks.
432 void do_all_blocks();
433
434 // Parse the current basic block
435 void do_one_block();
436
437 // Raise an error if we get a bad ciTypeFlow CFG.
438 void handle_missing_successor(int bci);
439
440 // first actions (before BCI 0)
441 void do_method_entry();
442
448 void ensure_phis_everywhere();
449
450 // Merge the current mapping into the basic block starting at bci
451 void merge( int target_bci);
452 // Same as plain merge, except that it allocates a new path number.
453 void merge_new_path( int target_bci);
454 // Merge the current mapping into an exception handler.
455 void merge_exception(int target_bci);
456 // Helper: Merge the current mapping into the given basic block
457 void merge_common(Block* target, int pnum);
458 // Helper functions for merging individual cells.
459 PhiNode *ensure_phi( int idx, bool nocreate = false);
460 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
461 // Helper to merge the current memory state into the given basic block
462 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
463
464 // Parse this bytecode, and alter the Parsers JVM->Node mapping
465 void do_one_bytecode();
466
467 // helper function to generate array store check
468 Node* array_store_check(Node*& adr, const Type*& elemtype);
469 // Helper function to generate array load
470 void array_load(BasicType etype);
471 // Helper function to generate array store
472 void array_store(BasicType etype);
473 // Helper function to compute array addressing
474 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
475 Node* record_profile_for_speculation_at_array_load(Node* ld);
476
477 void clinit_deopt();
478
479 void rtm_deopt();
480
481 // Pass current map to exits
482 void return_current(Node* value);
483
484 // Register finalizers on return from Object.<init>
485 void call_register_finalizer();
486
487 // Insert a compiler safepoint into the graph
488 void add_safepoint();
489
490 // Insert a compiler safepoint into the graph, if there is a back-branch.
491 void maybe_add_safepoint(int target_bci) {
492 if (target_bci <= bci()) {
493 add_safepoint();
494 }
495 }
504
505 // Helper functions for type checking bytecodes:
506 void do_checkcast();
507 void do_instanceof();
508
509 // Helper functions for shifting & arithmetic
510 void modf();
511 void modd();
512 void l2f();
513
514 // implementation of _get* and _put* bytecodes
515 void do_getstatic() { do_field_access(true, false); }
516 void do_getfield () { do_field_access(true, true); }
517 void do_putstatic() { do_field_access(false, false); }
518 void do_putfield () { do_field_access(false, true); }
519
520 // common code for making initial checks and forming addresses
521 void do_field_access(bool is_get, bool is_field);
522
523 // common code for actually performing the load or store
524 void do_get_xxx(Node* obj, ciField* field);
525 void do_put_xxx(Node* obj, ciField* field, bool is_field);
526
527 // implementation of object creation bytecodes
528 void do_new();
529 void do_aconst_init();
530 void do_withfield();
531 void do_newarray(BasicType elemtype);
532 void do_newarray();
533 void do_multianewarray();
534 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
535
536 // implementation of jsr/ret
537 void do_jsr();
538 void do_ret();
539
540 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
541 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
542 bool seems_never_taken(float prob) const;
543 bool path_is_suitable_for_uncommon_trap(float prob) const;
544 bool seems_stable_comparison() const;
545
546 void do_ifnull(BoolTest::mask btest, Node* c);
547 void do_if(BoolTest::mask btest, Node* c, bool new_path = false, Node** ctrl_taken = NULL);
548 void do_acmp(BoolTest::mask btest, Node* left, Node* right);
549 void acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region);
550 void acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region);
551 Node* acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl);
552 void acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region);
553 int repush_if_args();
554 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
555 void sharpen_type_after_if(BoolTest::mask btest,
556 Node* con, const Type* tcon,
557 Node* val, const Type* tval);
558 void maybe_add_predicate_after_if(Block* path);
559 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
560 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
561 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
562 void jump_if_always_fork(int dest_bci_if_true, bool unc);
563
564 friend class SwitchRange;
565 void do_tableswitch();
566 void do_lookupswitch();
567 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
568 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
569 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
570
571 void decrement_age();
572
573 // helper function for call statistics
574 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
|