334 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
335 #endif
336
337 private:
338 friend class Block;
339
340 // Variables which characterize this compilation as a whole:
341
342 JVMState* _caller; // JVMS which carries incoming args & state.
343 float _expected_uses; // expected number of calls to this code
344 float _prof_factor; // discount applied to my profile counts
345 int _depth; // Inline tree depth, for debug printouts
346 const TypeFunc*_tf; // My kind of function type
347 int _entry_bci; // the osr bci or InvocationEntryBci
348
349 ciTypeFlow* _flow; // Results of previous flow pass.
350 Block* _blocks; // Array of basic-block structs.
351 int _block_count; // Number of elements in _blocks.
352
353 GraphKit _exits; // Record all normal returns and throws here.
354 bool _wrote_final; // Did we write a final field?
355 bool _wrote_volatile; // Did we write a volatile field?
356 bool _wrote_stable; // Did we write a @Stable field?
357 bool _wrote_fields; // Did we write any field?
358 Node* _alloc_with_final_or_stable; // An allocation node with final or @Stable field
359 Node* _stress_rf_hook; // StressReachabilityFences support
360
361 // Variables which track Java semantics during bytecode parsing:
362
363 Block* _block; // block currently getting parsed
364 ciBytecodeStream _iter; // stream of this method's bytecodes
365
366 const FastLockNode* _synch_lock; // FastLockNode for synchronized method
367
368 #ifndef PRODUCT
369 int _max_switch_depth; // Debugging SwitchRanges.
370 int _est_switch_depth; // Debugging SwitchRanges.
371 #endif
372
373 bool _first_return; // true if return is the first to be parsed
374 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
376
377 public:
378 // Constructor
379 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
380
381 virtual Parse* is_Parse() const { return (Parse*)this; }
382
383 // Accessors.
384 JVMState* caller() const { return _caller; }
385 float expected_uses() const { return _expected_uses; }
386 float prof_factor() const { return _prof_factor; }
387 int depth() const { return _depth; }
388 const TypeFunc* tf() const { return _tf; }
389 // entry_bci() -- see osr_bci, etc.
390
391 ciTypeFlow* flow() const { return _flow; }
392 // blocks() -- see rpo_at, start_block, etc.
393 int block_count() const { return _block_count; }
394
395 GraphKit& exits() { return _exits; }
396 bool wrote_final() const { return _wrote_final; }
397 void set_wrote_final(bool z) { _wrote_final = z; }
398 bool wrote_volatile() const { return _wrote_volatile; }
399 void set_wrote_volatile(bool z) { _wrote_volatile = z; }
400 bool wrote_stable() const { return _wrote_stable; }
401 void set_wrote_stable(bool z) { _wrote_stable = z; }
402 bool wrote_fields() const { return _wrote_fields; }
403 void set_wrote_fields(bool z) { _wrote_fields = z; }
404 Node* alloc_with_final_or_stable() const { return _alloc_with_final_or_stable; }
405 void set_alloc_with_final_or_stable(Node* n) {
406 assert((_alloc_with_final_or_stable == nullptr) || (_alloc_with_final_or_stable == n), "different init objects?");
407 _alloc_with_final_or_stable = n;
408 }
409
410 Block* block() const { return _block; }
411 ciBytecodeStream& iter() { return _iter; }
412 Bytecodes::Code bc() const { return _iter.cur_bc(); }
413
414 void set_block(Block* b) { _block = b; }
415
416 // Derived accessors:
417 bool is_osr_parse() const {
426 // Must this parse be aborted?
427 bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
428
429 Block* rpo_at(int rpo) {
430 assert(0 <= rpo && rpo < _block_count, "oob");
431 return &_blocks[rpo];
432 }
433 Block* start_block() {
434 return rpo_at(flow()->start_block()->rpo());
435 }
436 // Can return null if the flow pass did not complete a block.
437 Block* successor_for_bci(int bci) {
438 return block()->successor_for_bci(bci);
439 }
440
441 private:
442 // Create a JVMS & map for the initial state of this method.
443 SafePointNode* create_entry_map();
444
445 // OSR helpers
446 Node *fetch_interpreter_state(int index, BasicType bt, Node* local_addrs);
447 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
448 void load_interpreter_state(Node* osr_buf);
449
450 // Functions for managing basic blocks:
451 void init_blocks();
452 void load_state_from(Block* b);
453 void store_state_to(Block* b) { b->record_state(this); }
454
455 // Parse all the basic blocks.
456 void do_all_blocks();
457
458 // Parse the current basic block
459 void do_one_block();
460
461 // Raise an error if we get a bad ciTypeFlow CFG.
462 void handle_missing_successor(int bci);
463
464 // first actions (before BCI 0)
465 void do_method_entry();
466
467 // implementation of monitorenter/monitorexit
468 void do_monitor_enter();
469 void do_monitor_exit();
470
471 // Eagerly create phie throughout the state, to cope with back edges.
472 void ensure_phis_everywhere();
473
474 // Merge the current mapping into the basic block starting at bci
475 void merge( int target_bci);
476 // Same as plain merge, except that it allocates a new path number.
477 void merge_new_path( int target_bci);
478 // Push the exception oop and merge the current mapping into an exception handler.
479 void push_and_merge_exception(int target_bci, Node* ex_oop);
480 // Helper: Merge the current mapping into the given basic block
481 void merge_common(Block* target, int pnum);
482 // Helper functions for merging individual cells.
483 PhiNode *ensure_phi( int idx, bool nocreate = false);
484 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
485 // Helper to merge the current memory state into the given basic block
486 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
487
488 // Parse this bytecode, and alter the Parsers JVM->Node mapping
489 void do_one_bytecode();
490
491 // helper function to generate array store check
492 void array_store_check();
493 // Helper function to generate array load
494 void array_load(BasicType etype);
495 // Helper function to generate array store
496 void array_store(BasicType etype);
497 // Helper function to compute array addressing
498 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
499
500 void clinit_deopt();
501
502 // Pass current map to exits
503 void return_current(Node* value);
504
505 // Register finalizers on return from Object.<init>
506 void call_register_finalizer();
507
508 // Insert a compiler safepoint into the graph
509 void add_safepoint();
510
511 // Insert a compiler safepoint into the graph, if there is a back-branch.
512 void maybe_add_safepoint(int target_bci) {
513 if (target_bci <= bci()) {
514 add_safepoint();
515 }
516 }
517
518 // Note: Intrinsic generation routines may be found in library_call.cpp.
524 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
525
526 // Helper functions for type checking bytecodes:
527 void do_checkcast();
528 void do_instanceof();
529
530 // Helper functions for shifting & arithmetic
531 Node* floating_point_mod(Node* a, Node* b, BasicType type);
532 void l2f();
533
534 // implementation of _get* and _put* bytecodes
535 void do_getstatic() { do_field_access(true, false); }
536 void do_getfield () { do_field_access(true, true); }
537 void do_putstatic() { do_field_access(false, false); }
538 void do_putfield () { do_field_access(false, true); }
539
540 // common code for making initial checks and forming addresses
541 void do_field_access(bool is_get, bool is_field);
542
543 // common code for actually performing the load or store
544 void do_get_xxx(Node* obj, ciField* field, bool is_field);
545 void do_put_xxx(Node* obj, ciField* field, bool is_field);
546
547 // implementation of object creation bytecodes
548 void do_new();
549 void do_newarray(BasicType elemtype);
550 void do_anewarray();
551 void do_multianewarray();
552 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
553
554 // implementation of jsr/ret
555 void do_jsr();
556 void do_ret();
557
558 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
559 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
560 bool seems_never_taken(float prob) const;
561 bool path_is_suitable_for_uncommon_trap(float prob) const;
562
563 void do_ifnull(BoolTest::mask btest, Node* c);
564 void do_if(BoolTest::mask btest, Node* c);
565 int repush_if_args();
566 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path);
567 void sharpen_type_after_if(BoolTest::mask btest,
568 Node* con, const Type* tcon,
569 Node* val, const Type* tval);
570 void maybe_add_predicate_after_if(Block* path);
571 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
572 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
573 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
574 void jump_if_always_fork(int dest_bci_if_true, bool unc);
575
576 friend class SwitchRange;
577 void do_tableswitch();
578 void do_lookupswitch();
579 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
580 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
581 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
582
583 // helper function for call statistics
584 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
585
586 Node_Notes* make_node_notes(Node_Notes* caller_nn);
|
334 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
335 #endif
336
337 private:
338 friend class Block;
339
340 // Variables which characterize this compilation as a whole:
341
342 JVMState* _caller; // JVMS which carries incoming args & state.
343 float _expected_uses; // expected number of calls to this code
344 float _prof_factor; // discount applied to my profile counts
345 int _depth; // Inline tree depth, for debug printouts
346 const TypeFunc*_tf; // My kind of function type
347 int _entry_bci; // the osr bci or InvocationEntryBci
348
349 ciTypeFlow* _flow; // Results of previous flow pass.
350 Block* _blocks; // Array of basic-block structs.
351 int _block_count; // Number of elements in _blocks.
352
353 GraphKit _exits; // Record all normal returns and throws here.
354 bool _wrote_non_strict_final; // Did we write a non-strict final field?
355 bool _wrote_volatile; // Did we write a volatile field?
356 bool _wrote_stable; // Did we write a @Stable field?
357 bool _wrote_fields; // Did we write any field?
358 Node* _alloc_with_final_or_stable; // An allocation node with final or @Stable field
359 Node* _stress_rf_hook; // StressReachabilityFences support
360
361 // Variables which track Java semantics during bytecode parsing:
362
363 Block* _block; // block currently getting parsed
364 ciBytecodeStream _iter; // stream of this method's bytecodes
365
366 const FastLockNode* _synch_lock; // FastLockNode for synchronized method
367
368 #ifndef PRODUCT
369 int _max_switch_depth; // Debugging SwitchRanges.
370 int _est_switch_depth; // Debugging SwitchRanges.
371 #endif
372
373 bool _first_return; // true if return is the first to be parsed
374 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
376
377 public:
378 // Constructor
379 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
380
381 virtual Parse* is_Parse() const { return (Parse*)this; }
382
383 // Accessors.
384 JVMState* caller() const { return _caller; }
385 float expected_uses() const { return _expected_uses; }
386 float prof_factor() const { return _prof_factor; }
387 int depth() const { return _depth; }
388 const TypeFunc* tf() const { return _tf; }
389 // entry_bci() -- see osr_bci, etc.
390
391 ciTypeFlow* flow() const { return _flow; }
392 // blocks() -- see rpo_at, start_block, etc.
393 int block_count() const { return _block_count; }
394
395 GraphKit& exits() { return _exits; }
396 bool wrote_non_strict_final() const { return _wrote_non_strict_final; }
397 void set_wrote_non_strict_final(bool z) { _wrote_non_strict_final = z; }
398 bool wrote_volatile() const { return _wrote_volatile; }
399 void set_wrote_volatile(bool z) { _wrote_volatile = z; }
400 bool wrote_stable() const { return _wrote_stable; }
401 void set_wrote_stable(bool z) { _wrote_stable = z; }
402 bool wrote_fields() const { return _wrote_fields; }
403 void set_wrote_fields(bool z) { _wrote_fields = z; }
404 Node* alloc_with_final_or_stable() const { return _alloc_with_final_or_stable; }
405 void set_alloc_with_final_or_stable(Node* n) {
406 assert((_alloc_with_final_or_stable == nullptr) || (_alloc_with_final_or_stable == n), "different init objects?");
407 _alloc_with_final_or_stable = n;
408 }
409
410 Block* block() const { return _block; }
411 ciBytecodeStream& iter() { return _iter; }
412 Bytecodes::Code bc() const { return _iter.cur_bc(); }
413
414 void set_block(Block* b) { _block = b; }
415
416 // Derived accessors:
417 bool is_osr_parse() const {
426 // Must this parse be aborted?
427 bool failing() const { return C->failing_internal(); } // might have cascading effects, not stressing bailouts for now.
428
429 Block* rpo_at(int rpo) {
430 assert(0 <= rpo && rpo < _block_count, "oob");
431 return &_blocks[rpo];
432 }
433 Block* start_block() {
434 return rpo_at(flow()->start_block()->rpo());
435 }
436 // Can return null if the flow pass did not complete a block.
437 Block* successor_for_bci(int bci) {
438 return block()->successor_for_bci(bci);
439 }
440
441 private:
442 // Create a JVMS & map for the initial state of this method.
443 SafePointNode* create_entry_map();
444
445 // OSR helpers
446 Node* fetch_interpreter_state(int index, const Type* type, Node* local_addrs);
447 Node* check_interpreter_type(Node* l, ciType* ci_type, SafePointNode* &bad_type_exit);
448 void load_interpreter_state(Node* osr_buf);
449
450 // Functions for managing basic blocks:
451 void init_blocks();
452 void load_state_from(Block* b);
453 void store_state_to(Block* b) { b->record_state(this); }
454
455 // Parse all the basic blocks.
456 void do_all_blocks();
457
458 // Parse the current basic block
459 void do_one_block();
460
461 // Raise an error if we get a bad ciTypeFlow CFG.
462 void handle_missing_successor(int bci);
463
464 // first actions (before BCI 0)
465 void do_method_entry();
466
467 // implementation of monitorenter/monitorexit
468 void do_monitor_enter();
469 void do_monitor_exit();
470
471 // Eagerly create phie throughout the state, to cope with back edges.
472 void ensure_phis_everywhere();
473
474 // Merge the current mapping into the basic block starting at bci
475 void merge( int target_bci);
476 // Same as plain merge, except that it allocates a new path number.
477 void merge_new_path( int target_bci);
478 // Push the exception oop and merge the current mapping into an exception handler.
479 void push_and_merge_exception(int target_bci, Node* ex_oop);
480 // Helper: Merge the current mapping into the given basic block
481 void merge_common(Block* target, int pnum);
482 // Helper functions for merging individual cells.
483 Node* ensure_phi( int idx, bool nocreate = false);
484 PhiNode* ensure_memory_phi(int idx, bool nocreate = false);
485 // Helper to merge the current memory state into the given basic block
486 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
487
488 // Parse this bytecode, and alter the Parsers JVM->Node mapping
489 void do_one_bytecode();
490
491 // helper function to generate array store check
492 Node* array_store_check(Node*& adr, const Type*& elemtype);
493 // Helper function to generate array load
494 void array_load(BasicType etype);
495 Node* load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr);
496 // Helper function to generate array store
497 void array_store(BasicType etype);
498 void store_to_unknown_flat_array(Node* array, Node* idx, Node* non_null_stored_value);
499 // Helper function to compute array addressing
500 Node* array_addressing(BasicType type, int vals, const Type*& elemtype);
501 bool needs_range_check(const TypeInt* size_type, const Node* index) const;
502 Node* create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type, const Type*& element_type);
503 Node* cast_to_speculative_array_type(Node* array, const TypeAryPtr*& array_type, const Type*& element_type);
504 Node* cast_to_profiled_array_type(Node* const array);
505 Node* speculate_non_null_free_array(Node* array, const TypeAryPtr*& array_type);
506 Node* speculate_non_flat_array(Node* array, const TypeAryPtr* array_type);
507 void create_range_check(Node* idx, Node* ary, const TypeInt* sizetype);
508 Node* record_profile_for_speculation_at_array_load(Node* ld);
509
510 void clinit_deopt();
511
512 // Pass current map to exits
513 void return_current(Node* value);
514
515 // Register finalizers on return from Object.<init>
516 void call_register_finalizer();
517
518 // Insert a compiler safepoint into the graph
519 void add_safepoint();
520
521 // Insert a compiler safepoint into the graph, if there is a back-branch.
522 void maybe_add_safepoint(int target_bci) {
523 if (target_bci <= bci()) {
524 add_safepoint();
525 }
526 }
527
528 // Note: Intrinsic generation routines may be found in library_call.cpp.
534 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
535
536 // Helper functions for type checking bytecodes:
537 void do_checkcast();
538 void do_instanceof();
539
540 // Helper functions for shifting & arithmetic
541 Node* floating_point_mod(Node* a, Node* b, BasicType type);
542 void l2f();
543
544 // implementation of _get* and _put* bytecodes
545 void do_getstatic() { do_field_access(true, false); }
546 void do_getfield () { do_field_access(true, true); }
547 void do_putstatic() { do_field_access(false, false); }
548 void do_putfield () { do_field_access(false, true); }
549
550 // common code for making initial checks and forming addresses
551 void do_field_access(bool is_get, bool is_field);
552
553 // common code for actually performing the load or store
554 void do_get_xxx(Node* obj, ciField* field);
555 void do_put_xxx(Node* obj, ciField* field, bool is_field);
556
557 ciType* improve_abstract_inline_type_klass(ciType* field_klass);
558
559 // implementation of object creation bytecodes
560 void do_new();
561 void do_newarray(BasicType elemtype);
562 void do_newarray();
563 void do_multianewarray();
564 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
565
566 // implementation of jsr/ret
567 void do_jsr();
568 void do_ret();
569
570 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
571 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
572 bool seems_never_taken(float prob) const;
573 bool path_is_suitable_for_uncommon_trap(float prob) const;
574
575 void do_ifnull(BoolTest::mask btest, Node* c);
576 void do_if(BoolTest::mask btest, Node* c, bool can_trap = true, bool new_path = false, Node** ctrl_taken = nullptr, Node** stress_count_mem = nullptr);
577 void do_acmp(BoolTest::mask btest, Node* left, Node* right);
578 void acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region);
579 void acmp_type_check_or_trap(Node** non_null_input, ciKlass* input_type, Deoptimization::DeoptReason);
580 void acmp_type_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region);
581 Node* acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl);
582 public:
583 static IfNode* acmp_fast_path_if_from_substitutable_call(PhaseGVN* phase, CallStaticJavaNode* call);
584 private:
585 int repush_if_args();
586 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap = true);
587 void sharpen_type_after_if(BoolTest::mask btest,
588 Node* con, const Type* tcon,
589 Node* val, const Type* tval);
590 void maybe_add_predicate_after_if(Block* path);
591 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
592 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, bool unc);
593 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, bool unc);
594 void jump_if_always_fork(int dest_bci_if_true, bool unc);
595
596 friend class SwitchRange;
597 void do_tableswitch();
598 void do_lookupswitch();
599 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
600 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
601 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
602
603 // helper function for call statistics
604 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
605
606 Node_Notes* make_node_notes(Node_Notes* caller_nn);
|