< prev index next >

src/hotspot/share/gc/g1/g1Policy.hpp

Print this page




  27 
  28 #include "gc/g1/g1CollectorPolicy.hpp"
  29 #include "gc/g1/g1CollectorState.hpp"
  30 #include "gc/g1/g1GCPhaseTimes.hpp"
  31 #include "gc/g1/g1InCSetState.hpp"
  32 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
  33 #include "gc/g1/g1MMUTracker.hpp"
  34 #include "gc/g1/g1RemSetTrackingPolicy.hpp"
  35 #include "gc/g1/g1Predictions.hpp"
  36 #include "gc/g1/g1YoungGenSizer.hpp"
  37 #include "gc/shared/gcCause.hpp"
  38 #include "utilities/pair.hpp"
  39 
  40 // A G1Policy makes policy decisions that determine the
  41 // characteristics of the collector.  Examples include:
  42 //   * choice of collection set.
  43 //   * when to collect.
  44 
  45 class HeapRegion;
  46 class G1CollectionSet;
  47 class G1CollectionSetCandidates;
  48 class G1CollectionSetChooser;
  49 class G1IHOPControl;
  50 class G1Analytics;
  51 class G1SurvivorRegions;
  52 class G1YoungGenSizer;
  53 class GCPolicyCounters;
  54 class STWGCTimer;
  55 
  56 class G1Policy: public CHeapObj<mtGC> {
  57  private:
  58 
  59   static G1IHOPControl* create_ihop_control(const G1Predictions* predictor);
  60   // Update the IHOP control with necessary statistics.
  61   void update_ihop_prediction(double mutator_time_s,
  62                               size_t mutator_alloc_bytes,
  63                               size_t young_gen_size,
  64                               bool this_gc_was_young_only);
  65   void report_ihop_statistics();
  66 
  67   G1Predictions _predictor;


 328   // Record start, end, and completion of cleanup.
 329   void record_concurrent_mark_cleanup_start();
 330   void record_concurrent_mark_cleanup_end();
 331 
 332   void print_phases();
 333 
 334   // Record how much space we copied during a GC. This is typically
 335   // called when a GC alloc region is being retired.
 336   void record_bytes_copied_during_gc(size_t bytes) {
 337     _bytes_copied_during_gc += bytes;
 338   }
 339 
 340   // The amount of space we copied during a GC.
 341   size_t bytes_copied_during_gc() const {
 342     return _bytes_copied_during_gc;
 343   }
 344 
 345   bool next_gc_should_be_mixed(const char* true_action_str,
 346                                const char* false_action_str) const;
 347 
 348   // Calculate and return the number of initial and optional old gen regions from
 349   // the given collection set candidates and the remaining time.
 350   void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
 351                                             double time_remaining_ms,
 352                                             uint& num_initial_regions,
 353                                             uint& num_optional_regions);
 354 
 355   // Calculate the number of optional regions from the given collection set candidates,
 356   // the remaining time and the maximum number of these regions and return the number
 357   // of actually selected regions in num_optional_regions.
 358   void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
 359                                                  uint const max_optional_regions,
 360                                                  double time_remaining_ms,
 361                                                  uint& num_optional_regions);
 362 
 363 private:
 364   // Set the state to start a concurrent marking cycle and clear
 365   // _initiate_conc_mark_if_possible because it has now been
 366   // acted on.
 367   void initiate_conc_mark();
 368 
 369 public:
 370   // This sets the initiate_conc_mark_if_possible() flag to start a
 371   // new cycle, as long as we are not already in one. It's best if it
 372   // is called during a safepoint when the test whether a cycle is in
 373   // progress or not is stable.
 374   bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
 375 
 376   // This is called at the very beginning of an evacuation pause (it
 377   // has to be the first thing that the pause does). If
 378   // initiate_conc_mark_if_possible() is true, and the concurrent
 379   // marking thread has completed its work during the previous cycle,
 380   // it will set in_initial_mark_gc() to so that the pause does
 381   // the initial-mark work and start a marking cycle.
 382   void decide_on_conc_mark_initiation();
 383 
 384   void finished_recalculating_age_indexes(bool is_survivors) {
 385     if (is_survivors) {
 386       _survivor_surv_rate_group->finished_recalculating_age_indexes();
 387     } else {
 388       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
 389     }
 390   }
 391 
 392   size_t young_list_target_length() const { return _young_list_target_length; }
 393 
 394   bool should_allocate_mutator_region() const;
 395 
 396   bool can_expand_young_list() const;
 397 
 398   uint young_list_max_length() const {
 399     return _young_list_max_length;
 400   }
 401 
 402   bool use_adaptive_young_list_length() const;
 403 
 404   void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
 405 
 406 private:
 407   //
 408   // Survivor regions policy.
 409   //
 410 
 411   // Current tenuring threshold, set to 0 if the collector reaches the
 412   // maximum amount of survivors regions.
 413   uint _tenuring_threshold;
 414 
 415   // The limit on the number of regions allocated for survivors.
 416   uint _max_survivor_regions;
 417 
 418   AgeTable _survivors_age_table;
 419 
 420   size_t desired_survivor_size(uint max_regions) const;
 421 
 422   // Fraction used when predicting how many optional regions to include in
 423   // the CSet. This fraction of the available time is used for optional regions,
 424   // the rest is used to add old regions to the normal CSet.
 425   double optional_prediction_fraction() { return 0.2; }
 426 
 427 public:
 428   // Fraction used when evacuating the optional regions. This fraction of the
 429   // remaining time is used to choose what regions to include in the evacuation.
 430   double optional_evacuation_fraction() { return 0.75; }
 431 
 432   uint tenuring_threshold() const { return _tenuring_threshold; }
 433 
 434   uint max_survivor_regions() {
 435     return _max_survivor_regions;
 436   }
 437 
 438   void note_start_adding_survivor_regions() {
 439     _survivor_surv_rate_group->start_adding_regions();
 440   }
 441 
 442   void note_stop_adding_survivor_regions() {
 443     _survivor_surv_rate_group->stop_adding_regions();
 444   }
 445 
 446   void record_age_table(AgeTable* age_table) {
 447     _survivors_age_table.merge(age_table);


  27 
  28 #include "gc/g1/g1CollectorPolicy.hpp"
  29 #include "gc/g1/g1CollectorState.hpp"
  30 #include "gc/g1/g1GCPhaseTimes.hpp"
  31 #include "gc/g1/g1InCSetState.hpp"
  32 #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
  33 #include "gc/g1/g1MMUTracker.hpp"
  34 #include "gc/g1/g1RemSetTrackingPolicy.hpp"
  35 #include "gc/g1/g1Predictions.hpp"
  36 #include "gc/g1/g1YoungGenSizer.hpp"
  37 #include "gc/shared/gcCause.hpp"
  38 #include "utilities/pair.hpp"
  39 
  40 // A G1Policy makes policy decisions that determine the
  41 // characteristics of the collector.  Examples include:
  42 //   * choice of collection set.
  43 //   * when to collect.
  44 
  45 class HeapRegion;
  46 class G1CollectionSet;

  47 class G1CollectionSetChooser;
  48 class G1IHOPControl;
  49 class G1Analytics;
  50 class G1SurvivorRegions;
  51 class G1YoungGenSizer;
  52 class GCPolicyCounters;
  53 class STWGCTimer;
  54 
  55 class G1Policy: public CHeapObj<mtGC> {
  56  private:
  57 
  58   static G1IHOPControl* create_ihop_control(const G1Predictions* predictor);
  59   // Update the IHOP control with necessary statistics.
  60   void update_ihop_prediction(double mutator_time_s,
  61                               size_t mutator_alloc_bytes,
  62                               size_t young_gen_size,
  63                               bool this_gc_was_young_only);
  64   void report_ihop_statistics();
  65 
  66   G1Predictions _predictor;


 327   // Record start, end, and completion of cleanup.
 328   void record_concurrent_mark_cleanup_start();
 329   void record_concurrent_mark_cleanup_end();
 330 
 331   void print_phases();
 332 
 333   // Record how much space we copied during a GC. This is typically
 334   // called when a GC alloc region is being retired.
 335   void record_bytes_copied_during_gc(size_t bytes) {
 336     _bytes_copied_during_gc += bytes;
 337   }
 338 
 339   // The amount of space we copied during a GC.
 340   size_t bytes_copied_during_gc() const {
 341     return _bytes_copied_during_gc;
 342   }
 343 
 344   bool next_gc_should_be_mixed(const char* true_action_str,
 345                                const char* false_action_str) const;
 346 
 347   uint finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);














 348 private:
 349   // Set the state to start a concurrent marking cycle and clear
 350   // _initiate_conc_mark_if_possible because it has now been
 351   // acted on.
 352   void initiate_conc_mark();
 353 
 354 public:
 355   // This sets the initiate_conc_mark_if_possible() flag to start a
 356   // new cycle, as long as we are not already in one. It's best if it
 357   // is called during a safepoint when the test whether a cycle is in
 358   // progress or not is stable.
 359   bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
 360 
 361   // This is called at the very beginning of an evacuation pause (it
 362   // has to be the first thing that the pause does). If
 363   // initiate_conc_mark_if_possible() is true, and the concurrent
 364   // marking thread has completed its work during the previous cycle,
 365   // it will set in_initial_mark_gc() to so that the pause does
 366   // the initial-mark work and start a marking cycle.
 367   void decide_on_conc_mark_initiation();
 368 
 369   void finished_recalculating_age_indexes(bool is_survivors) {
 370     if (is_survivors) {
 371       _survivor_surv_rate_group->finished_recalculating_age_indexes();
 372     } else {
 373       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
 374     }
 375   }
 376 
 377   size_t young_list_target_length() const { return _young_list_target_length; }
 378 
 379   bool should_allocate_mutator_region() const;
 380 
 381   bool can_expand_young_list() const;
 382 
 383   uint young_list_max_length() const {
 384     return _young_list_max_length;
 385   }
 386 
 387   bool adaptive_young_list_length() const;
 388 
 389   void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
 390 
 391 private:
 392   //
 393   // Survivor regions policy.
 394   //
 395 
 396   // Current tenuring threshold, set to 0 if the collector reaches the
 397   // maximum amount of survivors regions.
 398   uint _tenuring_threshold;
 399 
 400   // The limit on the number of regions allocated for survivors.
 401   uint _max_survivor_regions;
 402 
 403   AgeTable _survivors_age_table;
 404 
 405   size_t desired_survivor_size(uint max_regions) const;
 406 public:
 407   // Fraction used when predicting how many optional regions to include in
 408   // the CSet. This fraction of the available time is used for optional regions,
 409   // the rest is used to add old regions to the normal CSet.
 410   double optional_prediction_fraction() { return 0.2; }


 411   // Fraction used when evacuating the optional regions. This fraction of the
 412   // remaining time is used to choose what regions to include in the evacuation.
 413   double optional_evacuation_fraction() { return 0.75; }
 414 
 415   uint tenuring_threshold() const { return _tenuring_threshold; }
 416 
 417   uint max_survivor_regions() {
 418     return _max_survivor_regions;
 419   }
 420 
 421   void note_start_adding_survivor_regions() {
 422     _survivor_surv_rate_group->start_adding_regions();
 423   }
 424 
 425   void note_stop_adding_survivor_regions() {
 426     _survivor_surv_rate_group->stop_adding_regions();
 427   }
 428 
 429   void record_age_table(AgeTable* age_table) {
 430     _survivors_age_table.merge(age_table);
< prev index next >