--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jun 24 15:56:12 2010 -0700
@@ -3972,6 +3972,10 @@
void work(int i) {
if (i >= _n_workers) return; // no work needed this round
+
+ double start_time_ms = os::elapsedTime() * 1000.0;
+ _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
+
ResourceMark rm;
HandleMark hm;
@@ -4019,7 +4023,7 @@
double elapsed_ms = (os::elapsedTime()-start)*1000.0;
double term_ms = pss.term_time()*1000.0;
_g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
- _g1h->g1_policy()->record_termination_time(i, term_ms);
+ _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
}
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@@ -4043,7 +4047,8 @@
double term = pss.term_time();
gclog_or_tty->print(" Elapsed: %7.2f ms.\n"
" Strong roots: %7.2f ms (%6.2f%%)\n"
- " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n",
+ " Termination: %7.2f ms (%6.2f%%) "
+ "(in "SIZE_FORMAT" entries)\n",
elapsed * 1000.0,
strong_roots * 1000.0, (strong_roots*100.0/elapsed),
term * 1000.0, (term*100.0/elapsed),
@@ -4059,6 +4064,8 @@
assert(pss.refs_to_scan() == 0, "Task queue should be empty");
assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
+ double end_time_ms = os::elapsedTime() * 1000.0;
+ _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
}
};
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Jun 24 15:56:12 2010 -0700
@@ -1549,7 +1549,7 @@
int _hash_seed;
int _queue_num;
- int _term_attempts;
+ size_t _term_attempts;
#if G1_DETAILED_STATS
int _pushes, _pops, _steals, _steal_attempts;
int _overflow_pushes;
@@ -1727,8 +1727,8 @@
int* hash_seed() { return &_hash_seed; }
int queue_num() { return _queue_num; }
- int term_attempts() { return _term_attempts; }
- void note_term_attempt() { _term_attempts++; }
+ size_t term_attempts() { return _term_attempts; }
+ void note_term_attempt() { _term_attempts++; }
#if G1_DETAILED_STATS
int pushes() { return _pushes; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Jun 24 15:56:12 2010 -0700
@@ -231,20 +231,21 @@
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
+ _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
_par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
_par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
- _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
- _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
_par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
_par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads];
_par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
_par_last_termination_times_ms = new double[_parallel_gc_threads];
+ _par_last_termination_attempts = new double[_parallel_gc_threads];
+ _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
// start conservatively
_expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
@@ -274,10 +275,64 @@
// </NEW PREDICTION>
- double time_slice = (double) GCPauseIntervalMillis / 1000.0;
+ // Below, we might need to calculate the pause time target based on
+ // the pause interval. When we do so we are going to give G1 maximum
+ // flexibility and allow it to do pauses when it needs to. So, we'll
+ // arrange that the pause interval to be pause time target + 1 to
+ // ensure that a) the pause time target is maximized with respect to
+ // the pause interval and b) we maintain the invariant that pause
+ // time target < pause interval. If the user does not want this
+ // maximum flexibility, they will have to set the pause interval
+ // explicitly.
+
+ // First make sure that, if either parameter is set, its value is
+ // reasonable.
+ if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+ if (MaxGCPauseMillis < 1) {
+ vm_exit_during_initialization("MaxGCPauseMillis should be "
+ "greater than 0");
+ }
+ }
+ if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+ if (GCPauseIntervalMillis < 1) {
+ vm_exit_during_initialization("GCPauseIntervalMillis should be "
+ "greater than 0");
+ }
+ }
+
+ // Then, if the pause time target parameter was not set, set it to
+ // the default value.
+ if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+ if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+ // The default pause time target in G1 is 200ms
+ FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
+ } else {
+ // We do not allow the pause interval to be set without the
+ // pause time target
+ vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
+ "without setting MaxGCPauseMillis");
+ }
+ }
+
+ // Then, if the interval parameter was not set, set it according to
+ // the pause time target (this will also deal with the case when the
+ // pause time target is the default value).
+ if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+ FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
+ }
+
+ // Finally, make sure that the two parameters are consistent.
+ if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
+ char buffer[256];
+ jio_snprintf(buffer, 256,
+ "MaxGCPauseMillis (%u) should be less than "
+ "GCPauseIntervalMillis (%u)",
+ MaxGCPauseMillis, GCPauseIntervalMillis);
+ vm_exit_during_initialization(buffer);
+ }
+
double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
- guarantee(max_gc_time < time_slice,
- "Max GC time should not be greater than the time slice");
+ double time_slice = (double) GCPauseIntervalMillis / 1000.0;
_mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
_sigma = (double) G1ConfidencePercent / 100.0;
@@ -782,16 +837,17 @@
// if they are not set properly
for (int i = 0; i < _parallel_gc_threads; ++i) {
- _par_last_ext_root_scan_times_ms[i] = -666.0;
- _par_last_mark_stack_scan_times_ms[i] = -666.0;
- _par_last_update_rs_start_times_ms[i] = -666.0;
- _par_last_update_rs_times_ms[i] = -666.0;
- _par_last_update_rs_processed_buffers[i] = -666.0;
- _par_last_scan_rs_start_times_ms[i] = -666.0;
- _par_last_scan_rs_times_ms[i] = -666.0;
- _par_last_scan_new_refs_times_ms[i] = -666.0;
- _par_last_obj_copy_times_ms[i] = -666.0;
- _par_last_termination_times_ms[i] = -666.0;
+ _par_last_gc_worker_start_times_ms[i] = -1234.0;
+ _par_last_ext_root_scan_times_ms[i] = -1234.0;
+ _par_last_mark_stack_scan_times_ms[i] = -1234.0;
+ _par_last_update_rs_times_ms[i] = -1234.0;
+ _par_last_update_rs_processed_buffers[i] = -1234.0;
+ _par_last_scan_rs_times_ms[i] = -1234.0;
+ _par_last_scan_new_refs_times_ms[i] = -1234.0;
+ _par_last_obj_copy_times_ms[i] = -1234.0;
+ _par_last_termination_times_ms[i] = -1234.0;
+ _par_last_termination_attempts[i] = -1234.0;
+ _par_last_gc_worker_end_times_ms[i] = -1234.0;
}
#endif
@@ -942,9 +998,9 @@
return sum;
}
-void G1CollectorPolicy::print_par_stats (int level,
- const char* str,
- double* data,
+void G1CollectorPolicy::print_par_stats(int level,
+ const char* str,
+ double* data,
bool summary) {
double min = data[0], max = data[0];
double total = 0.0;
@@ -973,10 +1029,10 @@
gclog_or_tty->print_cr("]");
}
-void G1CollectorPolicy::print_par_buffers (int level,
- const char* str,
- double* data,
- bool summary) {
+void G1CollectorPolicy::print_par_sizes(int level,
+ const char* str,
+ double* data,
+ bool summary) {
double min = data[0], max = data[0];
double total = 0.0;
int j;
@@ -1321,15 +1377,22 @@
}
if (parallel) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
- print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false);
+ print_par_stats(2, "GC Worker Start Time",
+ _par_last_gc_worker_start_times_ms, false);
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
- print_par_buffers(3, "Processed Buffers",
- _par_last_update_rs_processed_buffers, true);
- print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
- print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
+ print_par_sizes(3, "Processed Buffers",
+ _par_last_update_rs_processed_buffers, true);
+ print_par_stats(2, "Ext Root Scanning",
+ _par_last_ext_root_scan_times_ms);
+ print_par_stats(2, "Mark Stack Scanning",
+ _par_last_mark_stack_scan_times_ms);
print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
print_par_stats(2, "Termination", _par_last_termination_times_ms);
+ print_par_sizes(3, "Termination Attempts",
+ _par_last_termination_attempts, true);
+ print_par_stats(2, "GC Worker End Time",
+ _par_last_gc_worker_end_times_ms, false);
print_stats(2, "Other", parallel_other_time);
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
} else {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Jun 24 15:56:12 2010 -0700
@@ -171,16 +171,17 @@
double* _cur_aux_times_ms;
bool* _cur_aux_times_set;
+ double* _par_last_gc_worker_start_times_ms;
double* _par_last_ext_root_scan_times_ms;
double* _par_last_mark_stack_scan_times_ms;
- double* _par_last_update_rs_start_times_ms;
double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers;
- double* _par_last_scan_rs_start_times_ms;
double* _par_last_scan_rs_times_ms;
double* _par_last_scan_new_refs_times_ms;
double* _par_last_obj_copy_times_ms;
double* _par_last_termination_times_ms;
+ double* _par_last_termination_attempts;
+ double* _par_last_gc_worker_end_times_ms;
// indicates that we are in young GC mode
bool _in_young_gc_mode;
@@ -559,13 +560,14 @@
}
protected:
- void print_stats (int level, const char* str, double value);
- void print_stats (int level, const char* str, int value);
- void print_par_stats (int level, const char* str, double* data) {
+ void print_stats(int level, const char* str, double value);
+ void print_stats(int level, const char* str, int value);
+
+ void print_par_stats(int level, const char* str, double* data) {
print_par_stats(level, str, data, true);
}
- void print_par_stats (int level, const char* str, double* data, bool summary);
- void print_par_buffers (int level, const char* str, double* data, bool summary);
+ void print_par_stats(int level, const char* str, double* data, bool summary);
+ void print_par_sizes(int level, const char* str, double* data, bool summary);
void check_other_times(int level,
NumberSeq* other_times_ms,
@@ -891,6 +893,10 @@
virtual void record_full_collection_start();
virtual void record_full_collection_end();
+ void record_gc_worker_start_time(int worker_i, double ms) {
+ _par_last_gc_worker_start_times_ms[worker_i] = ms;
+ }
+
void record_ext_root_scan_time(int worker_i, double ms) {
_par_last_ext_root_scan_times_ms[worker_i] = ms;
}
@@ -912,10 +918,6 @@
_all_mod_union_times_ms->add(ms);
}
- void record_update_rs_start_time(int thread, double ms) {
- _par_last_update_rs_start_times_ms[thread] = ms;
- }
-
void record_update_rs_time(int thread, double ms) {
_par_last_update_rs_times_ms[thread] = ms;
}
@@ -925,10 +927,6 @@
_par_last_update_rs_processed_buffers[thread] = processed_buffers;
}
- void record_scan_rs_start_time(int thread, double ms) {
- _par_last_scan_rs_start_times_ms[thread] = ms;
- }
-
void record_scan_rs_time(int thread, double ms) {
_par_last_scan_rs_times_ms[thread] = ms;
}
@@ -953,16 +951,13 @@
_par_last_obj_copy_times_ms[thread] += ms;
}
- void record_obj_copy_time(double ms) {
- record_obj_copy_time(0, ms);
+ void record_termination(int thread, double ms, size_t attempts) {
+ _par_last_termination_times_ms[thread] = ms;
+ _par_last_termination_attempts[thread] = (double) attempts;
}
- void record_termination_time(int thread, double ms) {
- _par_last_termination_times_ms[thread] = ms;
- }
-
- void record_termination_time(double ms) {
- record_termination_time(0, ms);
+ void record_gc_worker_end_time(int worker_i, double ms) {
+ _par_last_gc_worker_end_times_ms[worker_i] = ms;
}
void record_pause_time_ms(double ms) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Jun 24 15:56:12 2010 -0700
@@ -303,7 +303,6 @@
assert( _cards_scanned != NULL, "invariant" );
_cards_scanned[worker_i] = scanRScl.cards_done();
- _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
_g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
}
@@ -311,8 +310,6 @@
ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
double start = os::elapsedTime();
- _g1p->record_update_rs_start_time(worker_i, start * 1000.0);
-
// Apply the appropriate closure to all remaining log entries.
_g1->iterate_dirty_card_closure(false, worker_i);
// Now there should be no dirty cards.
@@ -471,7 +468,6 @@
updateRS(worker_i);
scanNewRefsRS(oc, worker_i);
} else {
- _g1p->record_update_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
_g1p->record_update_rs_processed_buffers(worker_i, 0.0);
_g1p->record_update_rs_time(worker_i, 0.0);
_g1p->record_scan_new_refs_time(worker_i, 0.0);
@@ -479,7 +475,6 @@
if (G1UseParallelRSetScanning || (worker_i == 0)) {
scanRS(oc, worker_i);
} else {
- _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
_g1p->record_scan_rs_time(worker_i, 0.0);
}
} else {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu Jun 24 15:56:12 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -566,14 +566,14 @@
#endif
// Commit new or uncommit old pages, if necessary.
- resize_commit_uncommit(changed_region, new_region);
+ if (resize_commit_uncommit(changed_region, new_region)) {
+ // Set the new start of the committed region
+ resize_update_committed_table(changed_region, new_region);
+ }
// Update card table entries
resize_update_card_table_entries(changed_region, new_region);
- // Set the new start of the committed region
- resize_update_committed_table(changed_region, new_region);
-
// Update the covered region
resize_update_covered_table(changed_region, new_region);
@@ -604,8 +604,9 @@
debug_only(verify_guard();)
}
-void CardTableExtension::resize_commit_uncommit(int changed_region,
+bool CardTableExtension::resize_commit_uncommit(int changed_region,
MemRegion new_region) {
+ bool result = false;
// Commit new or uncommit old pages, if necessary.
MemRegion cur_committed = _committed[changed_region];
assert(_covered[changed_region].end() == new_region.end(),
@@ -675,20 +676,31 @@
"card table expansion");
}
}
+ result = true;
} else if (new_start_aligned > cur_committed.start()) {
// Shrink the committed region
+#if 0 // uncommitting space is currently unsafe because of the interactions
+ // of growing and shrinking regions. One region A can uncommit space
+ // that it owns but which is being used by another region B (maybe).
+ // Region B has not committed the space because it was already
+ // committed by region A.
MemRegion uncommit_region = committed_unique_to_self(changed_region,
MemRegion(cur_committed.start(), new_start_aligned));
if (!uncommit_region.is_empty()) {
if (!os::uncommit_memory((char*)uncommit_region.start(),
uncommit_region.byte_size())) {
- vm_exit_out_of_memory(uncommit_region.byte_size(),
- "card table contraction");
+ // If the uncommit fails, ignore it. Let the
+ // committed table resizing go even though the committed
+ // table will over state the committed space.
}
}
+#else
+ assert(!result, "Should be false with current workaround");
+#endif
}
assert(_committed[changed_region].end() == cur_committed.end(),
"end should not change");
+ return result;
}
void CardTableExtension::resize_update_committed_table(int changed_region,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Thu Jun 24 15:56:12 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,9 @@
class CardTableExtension : public CardTableModRefBS {
private:
// Support methods for resizing the card table.
- void resize_commit_uncommit(int changed_region, MemRegion new_region);
+ // resize_commit_uncommit() returns true if the pages were committed or
+ // uncommitted
+ bool resize_commit_uncommit(int changed_region, MemRegion new_region);
void resize_update_card_table_entries(int changed_region,
MemRegion new_region);
void resize_update_committed_table(int changed_region, MemRegion new_region);
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Thu Jun 24 15:56:12 2010 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -284,12 +284,19 @@
committed_unique_to_self(ind, MemRegion(new_end_aligned,
cur_committed.end()));
if (!uncommit_region.is_empty()) {
- if (!os::uncommit_memory((char*)uncommit_region.start(),
- uncommit_region.byte_size())) {
- assert(false, "Card table contraction failed");
- // The call failed so don't change the end of the
- // committed region. This is better than taking the
- // VM down.
+ // It is not safe to uncommit cards if the boundary between
+ // the generations is moving. A shrink can uncommit cards
+ // owned by generation A but being used by generation B.
+ if (!UseAdaptiveGCBoundary) {
+ if (!os::uncommit_memory((char*)uncommit_region.start(),
+ uncommit_region.byte_size())) {
+ assert(false, "Card table contraction failed");
+ // The call failed so don't change the end of the
+ // committed region. This is better than taking the
+ // VM down.
+ new_end_aligned = _committed[ind].end();
+ }
+ } else {
new_end_aligned = _committed[ind].end();
}
}
@@ -297,6 +304,19 @@
// In any case, we can reset the end of the current committed entry.
_committed[ind].set_end(new_end_aligned);
+#ifdef ASSERT
+ // Check that the last card in the new region is committed according
+ // to the tables.
+ bool covered = false;
+ for (int cr = 0; cr < _cur_covered_regions; cr++) {
+ if (_committed[cr].contains(new_end - 1)) {
+ covered = true;
+ break;
+ }
+ }
+ assert(covered, "Card for end of new region not committed");
+#endif
+
// The default of 0 is not necessarily clean cards.
jbyte* entry;
if (old_region.last() < _whole_heap.start()) {
@@ -354,6 +374,9 @@
addr_for((jbyte*) _committed[ind].start()),
addr_for((jbyte*) _committed[ind].last()));
}
+ // Touch the last card of the covered region to show that it
+ // is committed (or SEGV).
+ debug_only(*byte_for(_covered[ind].last());)
debug_only(verify_guard();)
}
--- a/hotspot/src/share/vm/runtime/arguments.cpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Thu Jun 24 15:56:12 2010 -0700
@@ -1376,11 +1376,6 @@
}
no_shared_spaces();
- // Set the maximum pause time goal to be a reasonable default.
- if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
- FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
- }
-
if (FLAG_IS_DEFAULT(MarkStackSize)) {
FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
}
--- a/hotspot/src/share/vm/runtime/globals.hpp Tue Jun 15 18:07:27 2010 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Thu Jun 24 15:56:12 2010 -0700
@@ -1975,7 +1975,7 @@
"Adaptive size policy maximum GC pause time goal in msec, " \
"or (G1 Only) the max. GC time per MMU time slice") \
\
- product(intx, GCPauseIntervalMillis, 500, \
+ product(uintx, GCPauseIntervalMillis, 0, \
"Time slice for MMU specification") \
\
product(uintx, MaxGCMinorPauseMillis, max_uintx, \