6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
Summary: Treat a chuck where the allocation has failed as fully used.
Reviewed-by: ysr
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed Sep 24 15:34:06 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Sat Sep 27 00:33:13 2008 -0700
@@ -402,7 +402,7 @@
return result;
}
if (!is_tlab &&
- size >= (young_gen()->eden_space()->capacity_in_words() / 2)) {
+ size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
result = old_gen()->allocate(size, is_tlab);
if (result != NULL) {
return result;
--- a/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp Wed Sep 24 15:34:06 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/immutableSpace.hpp Sat Sep 27 00:33:13 2008 -0700
@@ -50,7 +50,8 @@
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }
// Size computations. Sizes are in heapwords.
- size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
+ size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
+ virtual size_t capacity_in_words(Thread*) const { return capacity_in_words(); }
// Iteration.
virtual void oop_iterate(OopClosure* cl);
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Wed Sep 24 15:34:06 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Sat Sep 27 00:33:13 2008 -0700
@@ -181,6 +181,25 @@
return lgrp_spaces()->at(i)->space()->free_in_bytes();
}
+
+size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
+ guarantee(thr != NULL, "No thread");
+ int lgrp_id = thr->lgrp_id();
+ if (lgrp_id == -1) {
+ if (lgrp_spaces()->length() > 0) {
+ return capacity_in_words() / lgrp_spaces()->length();
+ } else {
+ assert(false, "There should be at least one locality group");
+ return 0;
+ }
+ }
+ int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
+ if (i == -1) {
+ return 0;
+ }
+ return lgrp_spaces()->at(i)->space()->capacity_in_words();
+}
+
// Check if the NUMA topology has changed. Add and remove spaces if needed.
// The update can be forced by setting the force parameter equal to true.
bool MutableNUMASpace::update_layout(bool force) {
@@ -722,7 +741,8 @@
i = os::random() % lgrp_spaces()->length();
}
- MutableSpace *s = lgrp_spaces()->at(i)->space();
+ LGRPSpace* ls = lgrp_spaces()->at(i);
+ MutableSpace *s = ls->space();
HeapWord *p = s->allocate(size);
if (p != NULL) {
@@ -743,6 +763,9 @@
*(int*)i = 0;
}
}
+ if (p == NULL) {
+ ls->set_allocation_failed();
+ }
return p;
}
@@ -761,7 +784,8 @@
if (i == -1) {
i = os::random() % lgrp_spaces()->length();
}
- MutableSpace *s = lgrp_spaces()->at(i)->space();
+ LGRPSpace *ls = lgrp_spaces()->at(i);
+ MutableSpace *s = ls->space();
HeapWord *p = s->cas_allocate(size);
if (p != NULL) {
size_t remainder = pointer_delta(s->end(), p + size);
@@ -790,6 +814,9 @@
*(int*)i = 0;
}
}
+ if (p == NULL) {
+ ls->set_allocation_failed();
+ }
return p;
}
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Wed Sep 24 15:34:06 2008 -0400
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Sat Sep 27 00:33:13 2008 -0700
@@ -60,6 +60,7 @@
MutableSpace* _space;
MemRegion _invalid_region;
AdaptiveWeightedAverage *_alloc_rate;
+ bool _allocation_failed;
struct SpaceStats {
size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
@@ -81,7 +82,7 @@
char* last_page_scanned() { return _last_page_scanned; }
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
public:
- LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL) {
+ LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
_space = new MutableSpace();
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
}
@@ -103,8 +104,21 @@
return *(int*)lgrp_id_value == p->lgrp_id();
}
+ // Report a failed allocation.
+ void set_allocation_failed() { _allocation_failed = true; }
+
void sample() {
- alloc_rate()->sample(space()->used_in_bytes());
+ // If there was a failed allocation make allocation rate equal
+ // to the size of the whole chunk. This ensures the progress of
+ // the adaptation process.
+ size_t alloc_rate_sample;
+ if (_allocation_failed) {
+ alloc_rate_sample = space()->capacity_in_bytes();
+ _allocation_failed = false;
+ } else {
+ alloc_rate_sample = space()->used_in_bytes();
+ }
+ alloc_rate()->sample(alloc_rate_sample);
}
MemRegion invalid_region() const { return _invalid_region; }
@@ -190,6 +204,9 @@
virtual void ensure_parsability();
virtual size_t used_in_words() const;
virtual size_t free_in_words() const;
+
+ using MutableSpace::capacity_in_words;
+ virtual size_t capacity_in_words(Thread* thr) const;
virtual size_t tlab_capacity(Thread* thr) const;
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;