hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
author brutisso
Fri, 23 Mar 2012 15:28:24 +0100
changeset 12229 c34a85c8f5aa
parent 11402 739e52129c84
child 12379 2cf45b79ce3a
permissions -rw-r--r--
7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97 Summary: Make sure that MutableNUMASpace::ensure_parsability() only calls CollectedHeap::fill_with_object() with valid sizes and make sure CollectedHeap::filler_array_max_size() returns a value that can be converted to an int without overflow Reviewed-by: azeemj, jmasa, iveresov
Ignore whitespace changes - Everywhere: Within whitespace: At end of lines:
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
     1
489c9b5090e2 Initial load
duke
parents:
diff changeset
     2
/*
12229
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
     3
 * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
     4
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
489c9b5090e2 Initial load
duke
parents:
diff changeset
     5
 *
489c9b5090e2 Initial load
duke
parents:
diff changeset
     6
 * This code is free software; you can redistribute it and/or modify it
489c9b5090e2 Initial load
duke
parents:
diff changeset
     7
 * under the terms of the GNU General Public License version 2 only, as
489c9b5090e2 Initial load
duke
parents:
diff changeset
     8
 * published by the Free Software Foundation.
489c9b5090e2 Initial load
duke
parents:
diff changeset
     9
 *
489c9b5090e2 Initial load
duke
parents:
diff changeset
    10
 * This code is distributed in the hope that it will be useful, but WITHOUT
489c9b5090e2 Initial load
duke
parents:
diff changeset
    11
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
489c9b5090e2 Initial load
duke
parents:
diff changeset
    12
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
489c9b5090e2 Initial load
duke
parents:
diff changeset
    13
 * version 2 for more details (a copy is included in the LICENSE file that
489c9b5090e2 Initial load
duke
parents:
diff changeset
    14
 * accompanied this code).
489c9b5090e2 Initial load
duke
parents:
diff changeset
    15
 *
489c9b5090e2 Initial load
duke
parents:
diff changeset
    16
 * You should have received a copy of the GNU General Public License version
489c9b5090e2 Initial load
duke
parents:
diff changeset
    17
 * 2 along with this work; if not, write to the Free Software Foundation,
489c9b5090e2 Initial load
duke
parents:
diff changeset
    18
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
489c9b5090e2 Initial load
duke
parents:
diff changeset
    19
 *
5547
f4b087cbb361 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 2154
diff changeset
    20
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
f4b087cbb361 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 2154
diff changeset
    21
 * or visit www.oracle.com if you need additional information or have any
f4b087cbb361 6941466: Oracle rebranding changes for Hotspot repositories
trims
parents: 2154
diff changeset
    22
 * questions.
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
    23
 *
489c9b5090e2 Initial load
duke
parents:
diff changeset
    24
 */
489c9b5090e2 Initial load
duke
parents:
diff changeset
    25
7397
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    26
#include "precompiled.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    27
#include "gc_implementation/shared/mutableNUMASpace.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    28
#include "gc_implementation/shared/spaceDecorator.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    29
#include "memory/sharedHeap.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    30
#include "oops/oop.inline.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    31
#ifdef TARGET_OS_FAMILY_linux
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    32
# include "thread_linux.inline.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    33
#endif
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    34
#ifdef TARGET_OS_FAMILY_solaris
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    35
# include "thread_solaris.inline.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    36
#endif
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    37
#ifdef TARGET_OS_FAMILY_windows
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    38
# include "thread_windows.inline.hpp"
5b173b4ca846 6989984: Use standard include model for Hospot
stefank
parents: 5702
diff changeset
    39
#endif
10565
dc90c239f4ec 7089790: integrate bsd-port changes
never
parents: 7397
diff changeset
    40
#ifdef TARGET_OS_FAMILY_bsd
dc90c239f4ec 7089790: integrate bsd-port changes
never
parents: 7397
diff changeset
    41
# include "thread_bsd.inline.hpp"
dc90c239f4ec 7089790: integrate bsd-port changes
never
parents: 7397
diff changeset
    42
#endif
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
    43
489c9b5090e2 Initial load
duke
parents:
diff changeset
    44
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
    45
MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
    46
  _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
489c9b5090e2 Initial load
duke
parents:
diff changeset
    47
  _page_size = os::vm_page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
    48
  _adaptation_cycles = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
    49
  _samples_count = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
    50
  update_layout(true);
489c9b5090e2 Initial load
duke
parents:
diff changeset
    51
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
    52
489c9b5090e2 Initial load
duke
parents:
diff changeset
    53
MutableNUMASpace::~MutableNUMASpace() {
489c9b5090e2 Initial load
duke
parents:
diff changeset
    54
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
    55
    delete lgrp_spaces()->at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
    56
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
    57
  delete lgrp_spaces();
489c9b5090e2 Initial load
duke
parents:
diff changeset
    58
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
    59
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    60
#ifndef PRODUCT
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
    61
void MutableNUMASpace::mangle_unused_area() {
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    62
  // This method should do nothing.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    63
  // It can be called on a numa space during a full compaction.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    64
}
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    65
void MutableNUMASpace::mangle_unused_area_complete() {
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    66
  // This method should do nothing.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    67
  // It can be called on a numa space during a full compaction.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    68
}
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    69
void MutableNUMASpace::mangle_region(MemRegion mr) {
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    70
  // This method should do nothing because numa spaces are not mangled.
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
    71
}
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    72
void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    73
  assert(false, "Do not mangle MutableNUMASpace's");
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    74
}
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    75
void MutableNUMASpace::set_top_for_allocations() {
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    76
  // This method should do nothing.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    77
}
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    78
void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    79
  // This method should do nothing.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    80
}
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    81
void MutableNUMASpace::check_mangled_unused_area_complete() {
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    82
  // This method should do nothing.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    83
}
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
    84
#endif  // NOT_PRODUCT
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
    85
489c9b5090e2 Initial load
duke
parents:
diff changeset
    86
// There may be unallocated holes in the middle chunks
489c9b5090e2 Initial load
duke
parents:
diff changeset
    87
// that should be filled with dead objects to ensure parseability.
489c9b5090e2 Initial load
duke
parents:
diff changeset
    88
void MutableNUMASpace::ensure_parsability() {
489c9b5090e2 Initial load
duke
parents:
diff changeset
    89
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
    90
    LGRPSpace *ls = lgrp_spaces()->at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
    91
    MutableSpace *s = ls->space();
2131
98f9cef66a34 6810672: Comment typos
twisti
parents: 1911
diff changeset
    92
    if (s->top() < top()) { // For all spaces preceding the one containing top()
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
    93
      if (s->free_in_words() > 0) {
12229
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
    94
        intptr_t cur_top = (intptr_t)s->top();
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
    95
        size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
    96
        while (words_left_to_fill > 0) {
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
    97
          size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
    98
          assert(words_to_fill >= CollectedHeap::min_fill_size(),
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
    99
            err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   100
            words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   101
          CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   102
          if (!os::numa_has_static_binding()) {
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   103
            size_t touched_words = words_to_fill;
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   104
#ifndef ASSERT
12229
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   105
            if (!ZapUnusedHeapArea) {
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   106
              touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   107
                touched_words);
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   108
            }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   109
#endif
12229
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   110
            MemRegion invalid;
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   111
            HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   112
            HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   113
            if (crossing_start != crossing_end) {
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   114
              // If object header crossed a small page boundary we mark the area
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   115
              // as invalid rounding it to a page_size().
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   116
              HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   117
              HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   118
              invalid = MemRegion(start, end);
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   119
            }
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   120
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   121
            ls->add_invalid_region(invalid);
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   122
          }
12229
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   123
          cur_top = cur_top + (words_to_fill * HeapWordSize);
c34a85c8f5aa 7103665: HeapWord*ParallelScavengeHeap::failed_mem_allocate(unsigned long,bool)+0x97
brutisso
parents: 11402
diff changeset
   124
          words_left_to_fill -= words_to_fill;
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   125
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   126
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   127
    } else {
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   128
      if (!os::numa_has_static_binding()) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   129
#ifdef ASSERT
489c9b5090e2 Initial load
duke
parents:
diff changeset
   130
        MemRegion invalid(s->top(), s->end());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   131
        ls->add_invalid_region(invalid);
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   132
#else
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   133
        if (ZapUnusedHeapArea) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   134
          MemRegion invalid(s->top(), s->end());
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   135
          ls->add_invalid_region(invalid);
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   136
        } else {
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   137
          return;
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   138
        }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   139
#endif
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   140
      } else {
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   141
          return;
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   142
      }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   143
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   144
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   145
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   146
489c9b5090e2 Initial load
duke
parents:
diff changeset
   147
size_t MutableNUMASpace::used_in_words() const {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   148
  size_t s = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   149
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   150
    s += lgrp_spaces()->at(i)->space()->used_in_words();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   151
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   152
  return s;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   153
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   154
489c9b5090e2 Initial load
duke
parents:
diff changeset
   155
size_t MutableNUMASpace::free_in_words() const {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   156
  size_t s = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   157
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   158
    s += lgrp_spaces()->at(i)->space()->free_in_words();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   159
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   160
  return s;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   161
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   162
489c9b5090e2 Initial load
duke
parents:
diff changeset
   163
489c9b5090e2 Initial load
duke
parents:
diff changeset
   164
size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   165
  guarantee(thr != NULL, "No thread");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   166
  int lgrp_id = thr->lgrp_id();
976
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   167
  if (lgrp_id == -1) {
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   168
    // This case can occur after the topology of the system has
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   169
    // changed. Thread can change their location, the new home
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   170
    // group will be determined during the first allocation
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   171
    // attempt. For now we can safely assume that all spaces
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   172
    // have equal size because the whole space will be reinitialized.
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   173
    if (lgrp_spaces()->length() > 0) {
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   174
      return capacity_in_bytes() / lgrp_spaces()->length();
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   175
    } else {
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   176
      assert(false, "There should be at least one locality group");
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   177
      return 0;
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   178
    }
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   179
  }
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   180
  // That's the normal case, where we know the locality group of the thread.
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   181
  int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   182
  if (i == -1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   183
    return 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   184
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   185
  return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   186
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   187
489c9b5090e2 Initial load
duke
parents:
diff changeset
   188
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
976
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   189
  // Please see the comments for tlab_capacity().
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   190
  guarantee(thr != NULL, "No thread");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   191
  int lgrp_id = thr->lgrp_id();
976
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   192
  if (lgrp_id == -1) {
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   193
    if (lgrp_spaces()->length() > 0) {
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   194
      return free_in_bytes() / lgrp_spaces()->length();
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   195
    } else {
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   196
      assert(false, "There should be at least one locality group");
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   197
      return 0;
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   198
    }
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   199
  }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   200
  int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   201
  if (i == -1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   202
    return 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   203
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   204
  return lgrp_spaces()->at(i)->space()->free_in_bytes();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   205
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   206
1405
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   207
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   208
size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   209
  guarantee(thr != NULL, "No thread");
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   210
  int lgrp_id = thr->lgrp_id();
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   211
  if (lgrp_id == -1) {
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   212
    if (lgrp_spaces()->length() > 0) {
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   213
      return capacity_in_words() / lgrp_spaces()->length();
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   214
    } else {
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   215
      assert(false, "There should be at least one locality group");
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   216
      return 0;
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   217
    }
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   218
  }
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   219
  int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   220
  if (i == -1) {
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   221
    return 0;
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   222
  }
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   223
  return lgrp_spaces()->at(i)->space()->capacity_in_words();
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   224
}
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   225
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   226
// Check if the NUMA topology has changed. Add and remove spaces if needed.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   227
// The update can be forced by setting the force parameter equal to true.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   228
bool MutableNUMASpace::update_layout(bool force) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   229
  // Check if the topology had changed.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   230
  bool changed = os::numa_topology_changed();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   231
  if (force || changed) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   232
    // Compute lgrp intersection. Add/remove spaces.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   233
    int lgrp_limit = (int)os::numa_get_groups_num();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   234
    int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   235
    int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   236
    assert(lgrp_num > 0, "There should be at least one locality group");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   237
    // Add new spaces for the new nodes
489c9b5090e2 Initial load
duke
parents:
diff changeset
   238
    for (int i = 0; i < lgrp_num; i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   239
      bool found = false;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   240
      for (int j = 0; j < lgrp_spaces()->length(); j++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   241
        if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   242
          found = true;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   243
          break;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   244
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   245
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   246
      if (!found) {
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   247
        lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   248
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   249
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   250
489c9b5090e2 Initial load
duke
parents:
diff changeset
   251
    // Remove spaces for the removed nodes.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   252
    for (int i = 0; i < lgrp_spaces()->length();) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   253
      bool found = false;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   254
      for (int j = 0; j < lgrp_num; j++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   255
        if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   256
          found = true;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   257
          break;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   258
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   259
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   260
      if (!found) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   261
        delete lgrp_spaces()->at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   262
        lgrp_spaces()->remove_at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   263
      } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   264
        i++;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   265
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   266
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   267
489c9b5090e2 Initial load
duke
parents:
diff changeset
   268
    FREE_C_HEAP_ARRAY(int, lgrp_ids);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   269
489c9b5090e2 Initial load
duke
parents:
diff changeset
   270
    if (changed) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   271
      for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   272
        thread->set_lgrp_id(-1);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   273
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   274
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   275
    return true;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   276
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   277
  return false;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   278
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   279
489c9b5090e2 Initial load
duke
parents:
diff changeset
   280
// Bias region towards the first-touching lgrp. Set the right page sizes.
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   281
void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   282
  HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   283
  HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   284
  if (end > start) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   285
    MemRegion aligned_region(start, end);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   286
    assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
489c9b5090e2 Initial load
duke
parents:
diff changeset
   287
           (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   288
    assert(region().contains(aligned_region), "Sanity");
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   289
    // First we tell the OS which page size we want in the given range. The underlying
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   290
    // large page can be broken down if we require small pages.
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   291
    os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   292
    // Then we uncommit the pages in the range.
11402
739e52129c84 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 10565
diff changeset
   293
    os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   294
    // And make them local/first-touch biased.
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   295
    os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   296
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   297
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   298
489c9b5090e2 Initial load
duke
parents:
diff changeset
   299
// Free all pages in the region.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   300
void MutableNUMASpace::free_region(MemRegion mr) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   301
  HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   302
  HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   303
  if (end > start) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   304
    MemRegion aligned_region(start, end);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   305
    assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
489c9b5090e2 Initial load
duke
parents:
diff changeset
   306
           (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   307
    assert(region().contains(aligned_region), "Sanity");
11402
739e52129c84 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 10565
diff changeset
   308
    os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   309
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   310
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   311
489c9b5090e2 Initial load
duke
parents:
diff changeset
   312
// Update space layout. Perform adaptation.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   313
void MutableNUMASpace::update() {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   314
  if (update_layout(false)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   315
    // If the topology has changed, make all chunks zero-sized.
976
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   316
    // And clear the alloc-rate statistics.
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   317
    // In future we may want to handle this more gracefully in order
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   318
    // to avoid the reallocation of the pages as much as possible.
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   319
    for (int i = 0; i < lgrp_spaces()->length(); i++) {
976
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   320
      LGRPSpace *ls = lgrp_spaces()->at(i);
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   321
      MutableSpace *s = ls->space();
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   322
      s->set_end(s->bottom());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   323
      s->set_top(s->bottom());
976
241230d48896 6723228: NUMA allocator: assert(lgrp_id != -1, "No lgrp_id set")
iveresov
parents: 971
diff changeset
   324
      ls->clear_alloc_rate();
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   325
    }
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   326
    // A NUMA space is never mangled
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   327
    initialize(region(),
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   328
               SpaceDecorator::Clear,
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   329
               SpaceDecorator::DontMangle);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   330
  } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   331
    bool should_initialize = false;
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   332
    if (!os::numa_has_static_binding()) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   333
      for (int i = 0; i < lgrp_spaces()->length(); i++) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   334
        if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   335
          should_initialize = true;
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   336
          break;
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   337
        }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   338
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   339
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   340
489c9b5090e2 Initial load
duke
parents:
diff changeset
   341
    if (should_initialize ||
489c9b5090e2 Initial load
duke
parents:
diff changeset
   342
        (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   343
      // A NUMA space is never mangled
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   344
      initialize(region(),
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   345
                 SpaceDecorator::Clear,
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   346
                 SpaceDecorator::DontMangle);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   347
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   348
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   349
489c9b5090e2 Initial load
duke
parents:
diff changeset
   350
  if (NUMAStats) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   351
    for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   352
      lgrp_spaces()->at(i)->accumulate_statistics(page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   353
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   354
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   355
489c9b5090e2 Initial load
duke
parents:
diff changeset
   356
  scan_pages(NUMAPageScanRate);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   357
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   358
489c9b5090e2 Initial load
duke
parents:
diff changeset
   359
// Scan pages. Free pages that have smaller size or wrong placement.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   360
void MutableNUMASpace::scan_pages(size_t page_count)
489c9b5090e2 Initial load
duke
parents:
diff changeset
   361
{
489c9b5090e2 Initial load
duke
parents:
diff changeset
   362
  size_t pages_per_chunk = page_count / lgrp_spaces()->length();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   363
  if (pages_per_chunk > 0) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   364
    for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   365
      LGRPSpace *ls = lgrp_spaces()->at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   366
      ls->scan_pages(page_size(), pages_per_chunk);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   367
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   368
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   369
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   370
489c9b5090e2 Initial load
duke
parents:
diff changeset
   371
// Accumulate statistics about the allocation rate of each lgrp.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   372
void MutableNUMASpace::accumulate_statistics() {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   373
  if (UseAdaptiveNUMAChunkSizing) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   374
    for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   375
      lgrp_spaces()->at(i)->sample();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   376
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   377
    increment_samples_count();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   378
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   379
489c9b5090e2 Initial load
duke
parents:
diff changeset
   380
  if (NUMAStats) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   381
    for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   382
      lgrp_spaces()->at(i)->accumulate_statistics(page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   383
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   384
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   385
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   386
489c9b5090e2 Initial load
duke
parents:
diff changeset
   387
// Get the current size of a chunk.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   388
// This function computes the size of the chunk based on the
489c9b5090e2 Initial load
duke
parents:
diff changeset
   389
// difference between chunk ends. This allows it to work correctly in
489c9b5090e2 Initial load
duke
parents:
diff changeset
   390
// case the whole space is resized and during the process of adaptive
489c9b5090e2 Initial load
duke
parents:
diff changeset
   391
// chunk resizing.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   392
size_t MutableNUMASpace::current_chunk_size(int i) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   393
  HeapWord *cur_end, *prev_end;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   394
  if (i == 0) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   395
    prev_end = bottom();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   396
  } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   397
    prev_end = lgrp_spaces()->at(i - 1)->space()->end();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   398
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   399
  if (i == lgrp_spaces()->length() - 1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   400
    cur_end = end();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   401
  } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   402
    cur_end = lgrp_spaces()->at(i)->space()->end();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   403
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   404
  if (cur_end > prev_end) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   405
    return pointer_delta(cur_end, prev_end, sizeof(char));
489c9b5090e2 Initial load
duke
parents:
diff changeset
   406
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   407
  return 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   408
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   409
489c9b5090e2 Initial load
duke
parents:
diff changeset
   410
// Return the default chunk size by equally diving the space.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   411
// page_size() aligned.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   412
size_t MutableNUMASpace::default_chunk_size() {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   413
  return base_space_size() / lgrp_spaces()->length() * page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   414
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   415
489c9b5090e2 Initial load
duke
parents:
diff changeset
   416
// Produce a new chunk size. page_size() aligned.
1423
1233b1e85dfd 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 1405
diff changeset
   417
// This function is expected to be called on sequence of i's from 0 to
1233b1e85dfd 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 1405
diff changeset
   418
// lgrp_spaces()->length().
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   419
size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   420
  size_t pages_available = base_space_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   421
  for (int j = 0; j < i; j++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   422
    pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   423
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   424
  pages_available -= lgrp_spaces()->length() - i - 1;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   425
  assert(pages_available > 0, "No pages left");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   426
  float alloc_rate = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   427
  for (int j = i; j < lgrp_spaces()->length(); j++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   428
    alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   429
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   430
  size_t chunk_size = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   431
  if (alloc_rate > 0) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   432
    LGRPSpace *ls = lgrp_spaces()->at(i);
1423
1233b1e85dfd 6753547: NUMA allocator: Invalid chunk size computation during adaptive resizing
iveresov
parents: 1405
diff changeset
   433
    chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size();
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   434
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   435
  chunk_size = MAX2(chunk_size, page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   436
489c9b5090e2 Initial load
duke
parents:
diff changeset
   437
  if (limit > 0) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   438
    limit = round_down(limit, page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   439
    if (chunk_size > current_chunk_size(i)) {
1615
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   440
      size_t upper_bound = pages_available * page_size();
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   441
      if (upper_bound > limit &&
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   442
          current_chunk_size(i) < upper_bound - limit) {
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   443
        // The resulting upper bound should not exceed the available
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   444
        // amount of memory (pages_available * page_size()).
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   445
        upper_bound = current_chunk_size(i) + limit;
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   446
      }
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   447
      chunk_size = MIN2(chunk_size, upper_bound);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   448
    } else {
1615
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   449
      size_t lower_bound = page_size();
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   450
      if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   451
        lower_bound = current_chunk_size(i) - limit;
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   452
      }
b46d9f19bde2 6779436: NUMA allocator: libnuma expects certain size of the buffer in numa_node_to_cpus()
iveresov
parents: 1423
diff changeset
   453
      chunk_size = MAX2(chunk_size, lower_bound);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   454
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   455
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   456
  assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   457
  return chunk_size;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   458
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   459
489c9b5090e2 Initial load
duke
parents:
diff changeset
   460
489c9b5090e2 Initial load
duke
parents:
diff changeset
   461
// Return the bottom_region and the top_region. Align them to page_size() boundary.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   462
// |------------------new_region---------------------------------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   463
// |----bottom_region--|---intersection---|------top_region------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   464
void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
489c9b5090e2 Initial load
duke
parents:
diff changeset
   465
                                    MemRegion* bottom_region, MemRegion *top_region) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   466
  // Is there bottom?
489c9b5090e2 Initial load
duke
parents:
diff changeset
   467
  if (new_region.start() < intersection.start()) { // Yes
489c9b5090e2 Initial load
duke
parents:
diff changeset
   468
    // Try to coalesce small pages into a large one.
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   469
    if (UseLargePages && page_size() >= alignment()) {
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   470
      HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   471
      if (new_region.contains(p)
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   472
          && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   473
        if (intersection.contains(p)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   474
          intersection = MemRegion(p, intersection.end());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   475
        } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   476
          intersection = MemRegion(p, p);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   477
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   478
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   479
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   480
    *bottom_region = MemRegion(new_region.start(), intersection.start());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   481
  } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   482
    *bottom_region = MemRegion();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   483
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   484
489c9b5090e2 Initial load
duke
parents:
diff changeset
   485
  // Is there top?
489c9b5090e2 Initial load
duke
parents:
diff changeset
   486
  if (intersection.end() < new_region.end()) { // Yes
489c9b5090e2 Initial load
duke
parents:
diff changeset
   487
    // Try to coalesce small pages into a large one.
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   488
    if (UseLargePages && page_size() >= alignment()) {
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   489
      HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   490
      if (new_region.contains(p)
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   491
          && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   492
        if (intersection.contains(p)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   493
          intersection = MemRegion(intersection.start(), p);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   494
        } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   495
          intersection = MemRegion(p, p);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   496
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   497
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   498
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   499
    *top_region = MemRegion(intersection.end(), new_region.end());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   500
  } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   501
    *top_region = MemRegion();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   502
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   503
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   504
489c9b5090e2 Initial load
duke
parents:
diff changeset
   505
// Try to merge the invalid region with the bottom or top region by decreasing
489c9b5090e2 Initial load
duke
parents:
diff changeset
   506
// the intersection area. Return the invalid_region aligned to the page_size()
489c9b5090e2 Initial load
duke
parents:
diff changeset
   507
// boundary if it's inside the intersection. Return non-empty invalid_region
489c9b5090e2 Initial load
duke
parents:
diff changeset
   508
// if it lies inside the intersection (also page-aligned).
489c9b5090e2 Initial load
duke
parents:
diff changeset
   509
// |------------------new_region---------------------------------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   510
// |----------------|-------invalid---|--------------------------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   511
// |----bottom_region--|---intersection---|------top_region------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   512
void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
489c9b5090e2 Initial load
duke
parents:
diff changeset
   513
                                     MemRegion *invalid_region) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   514
  if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   515
    *intersection = MemRegion(invalid_region->end(), intersection->end());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   516
    *invalid_region = MemRegion();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   517
  } else
489c9b5090e2 Initial load
duke
parents:
diff changeset
   518
    if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   519
      *intersection = MemRegion(intersection->start(), invalid_region->start());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   520
      *invalid_region = MemRegion();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   521
    } else
489c9b5090e2 Initial load
duke
parents:
diff changeset
   522
      if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   523
        *intersection = MemRegion(new_region.start(), new_region.start());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   524
        *invalid_region = MemRegion();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   525
      } else
489c9b5090e2 Initial load
duke
parents:
diff changeset
   526
        if (intersection->contains(invalid_region)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   527
            // That's the only case we have to make an additional bias_region() call.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   528
            HeapWord* start = invalid_region->start();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   529
            HeapWord* end = invalid_region->end();
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   530
            if (UseLargePages && page_size() >= alignment()) {
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   531
              HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   532
              if (new_region.contains(p)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   533
                start = p;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   534
              }
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   535
              p = (HeapWord*)round_to((intptr_t) end, alignment());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   536
              if (new_region.contains(end)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   537
                end = p;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   538
              }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   539
            }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   540
            if (intersection->start() > start) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   541
              *intersection = MemRegion(start, intersection->end());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   542
            }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   543
            if (intersection->end() < end) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   544
              *intersection = MemRegion(intersection->start(), end);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   545
            }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   546
            *invalid_region = MemRegion(start, end);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   547
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   548
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   549
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   550
void MutableNUMASpace::initialize(MemRegion mr,
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   551
                                  bool clear_space,
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   552
                                  bool mangle_space,
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   553
                                  bool setup_pages) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   554
  assert(clear_space, "Reallocation will destory data!");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   555
  assert(lgrp_spaces()->length() > 0, "There should be at least one space");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   556
489c9b5090e2 Initial load
duke
parents:
diff changeset
   557
  MemRegion old_region = region(), new_region;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   558
  set_bottom(mr.start());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   559
  set_end(mr.end());
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   560
  // Must always clear the space
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   561
  clear(SpaceDecorator::DontMangle);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   562
489c9b5090e2 Initial load
duke
parents:
diff changeset
   563
  // Compute chunk sizes
489c9b5090e2 Initial load
duke
parents:
diff changeset
   564
  size_t prev_page_size = page_size();
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   565
  set_page_size(UseLargePages ? alignment() : os::vm_page_size());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   566
  HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   567
  HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   568
  size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   569
489c9b5090e2 Initial load
duke
parents:
diff changeset
   570
  // Try small pages if the chunk size is too small
489c9b5090e2 Initial load
duke
parents:
diff changeset
   571
  if (base_space_size_pages / lgrp_spaces()->length() == 0
489c9b5090e2 Initial load
duke
parents:
diff changeset
   572
      && page_size() > (size_t)os::vm_page_size()) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   573
    set_page_size(os::vm_page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   574
    rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   575
    rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   576
    base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   577
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   578
  guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   579
  set_base_space_size(base_space_size_pages);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   580
489c9b5090e2 Initial load
duke
parents:
diff changeset
   581
  // Handle space resize
489c9b5090e2 Initial load
duke
parents:
diff changeset
   582
  MemRegion top_region, bottom_region;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   583
  if (!old_region.equals(region())) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   584
    new_region = MemRegion(rounded_bottom, rounded_end);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   585
    MemRegion intersection = new_region.intersection(old_region);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   586
    if (intersection.start() == NULL ||
489c9b5090e2 Initial load
duke
parents:
diff changeset
   587
        intersection.end() == NULL   ||
489c9b5090e2 Initial load
duke
parents:
diff changeset
   588
        prev_page_size > page_size()) { // If the page size got smaller we have to change
489c9b5090e2 Initial load
duke
parents:
diff changeset
   589
                                        // the page size preference for the whole space.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   590
      intersection = MemRegion(new_region.start(), new_region.start());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   591
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   592
    select_tails(new_region, intersection, &bottom_region, &top_region);
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   593
    bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   594
    bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   595
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   596
489c9b5090e2 Initial load
duke
parents:
diff changeset
   597
  // Check if the space layout has changed significantly?
489c9b5090e2 Initial load
duke
parents:
diff changeset
   598
  // This happens when the space has been resized so that either head or tail
489c9b5090e2 Initial load
duke
parents:
diff changeset
   599
  // chunk became less than a page.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   600
  bool layout_valid = UseAdaptiveNUMAChunkSizing          &&
489c9b5090e2 Initial load
duke
parents:
diff changeset
   601
                      current_chunk_size(0) > page_size() &&
489c9b5090e2 Initial load
duke
parents:
diff changeset
   602
                      current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   603
489c9b5090e2 Initial load
duke
parents:
diff changeset
   604
489c9b5090e2 Initial load
duke
parents:
diff changeset
   605
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   606
    LGRPSpace *ls = lgrp_spaces()->at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   607
    MutableSpace *s = ls->space();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   608
    old_region = s->region();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   609
489c9b5090e2 Initial load
duke
parents:
diff changeset
   610
    size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   611
    if (i < lgrp_spaces()->length() - 1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   612
      if (!UseAdaptiveNUMAChunkSizing                                ||
489c9b5090e2 Initial load
duke
parents:
diff changeset
   613
          (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
489c9b5090e2 Initial load
duke
parents:
diff changeset
   614
           samples_count() < AdaptiveSizePolicyReadyThreshold) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   615
        // No adaptation. Divide the space equally.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   616
        chunk_byte_size = default_chunk_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   617
      } else
489c9b5090e2 Initial load
duke
parents:
diff changeset
   618
        if (!layout_valid || NUMASpaceResizeRate == 0) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   619
          // Fast adaptation. If no space resize rate is set, resize
489c9b5090e2 Initial load
duke
parents:
diff changeset
   620
          // the chunks instantly.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   621
          chunk_byte_size = adaptive_chunk_size(i, 0);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   622
        } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   623
          // Slow adaptation. Resize the chunks moving no more than
489c9b5090e2 Initial load
duke
parents:
diff changeset
   624
          // NUMASpaceResizeRate bytes per collection.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   625
          size_t limit = NUMASpaceResizeRate /
489c9b5090e2 Initial load
duke
parents:
diff changeset
   626
                         (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   627
          chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
489c9b5090e2 Initial load
duke
parents:
diff changeset
   628
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   629
489c9b5090e2 Initial load
duke
parents:
diff changeset
   630
      assert(chunk_byte_size >= page_size(), "Chunk size too small");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   631
      assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   632
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   633
489c9b5090e2 Initial load
duke
parents:
diff changeset
   634
    if (i == 0) { // Bottom chunk
489c9b5090e2 Initial load
duke
parents:
diff changeset
   635
      if (i != lgrp_spaces()->length() - 1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   636
        new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
489c9b5090e2 Initial load
duke
parents:
diff changeset
   637
      } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   638
        new_region = MemRegion(bottom(), end());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   639
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   640
    } else
489c9b5090e2 Initial load
duke
parents:
diff changeset
   641
      if (i < lgrp_spaces()->length() - 1) { // Middle chunks
489c9b5090e2 Initial load
duke
parents:
diff changeset
   642
        MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   643
        new_region = MemRegion(ps->end(),
489c9b5090e2 Initial load
duke
parents:
diff changeset
   644
                               ps->end() + (chunk_byte_size >> LogHeapWordSize));
489c9b5090e2 Initial load
duke
parents:
diff changeset
   645
      } else { // Top chunk
489c9b5090e2 Initial load
duke
parents:
diff changeset
   646
        MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   647
        new_region = MemRegion(ps->end(), end());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   648
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   649
    guarantee(region().contains(new_region), "Region invariant");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   650
489c9b5090e2 Initial load
duke
parents:
diff changeset
   651
489c9b5090e2 Initial load
duke
parents:
diff changeset
   652
    // The general case:
489c9b5090e2 Initial load
duke
parents:
diff changeset
   653
    // |---------------------|--invalid---|--------------------------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   654
    // |------------------new_region---------------------------------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   655
    // |----bottom_region--|---intersection---|------top_region------|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   656
    //                     |----old_region----|
489c9b5090e2 Initial load
duke
parents:
diff changeset
   657
    // The intersection part has all pages in place we don't need to migrate them.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   658
    // Pages for the top and bottom part should be freed and then reallocated.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   659
489c9b5090e2 Initial load
duke
parents:
diff changeset
   660
    MemRegion intersection = old_region.intersection(new_region);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   661
489c9b5090e2 Initial load
duke
parents:
diff changeset
   662
    if (intersection.start() == NULL || intersection.end() == NULL) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   663
      intersection = MemRegion(new_region.start(), new_region.start());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   664
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   665
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   666
    if (!os::numa_has_static_binding()) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   667
      MemRegion invalid_region = ls->invalid_region().intersection(new_region);
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   668
      // Invalid region is a range of memory that could've possibly
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   669
      // been allocated on the other node. That's relevant only on Solaris where
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   670
      // there is no static memory binding.
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   671
      if (!invalid_region.is_empty()) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   672
        merge_regions(new_region, &intersection, &invalid_region);
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   673
        free_region(invalid_region);
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   674
        ls->set_invalid_region(MemRegion());
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   675
      }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   676
    }
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   677
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   678
    select_tails(new_region, intersection, &bottom_region, &top_region);
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   679
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   680
    if (!os::numa_has_static_binding()) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   681
      // If that's a system with the first-touch policy then it's enough
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   682
      // to free the pages.
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   683
      free_region(bottom_region);
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   684
      free_region(top_region);
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   685
    } else {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   686
      // In a system with static binding we have to change the bias whenever
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   687
      // we reshape the heap.
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   688
      bias_region(bottom_region, ls->lgrp_id());
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   689
      bias_region(top_region, ls->lgrp_id());
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   690
    }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   691
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   692
    // Clear space (set top = bottom) but never mangle.
1911
b7cfe7eb809c 6783381: NUMA allocator: don't pretouch eden space with UseNUMA
iveresov
parents: 1668
diff changeset
   693
    s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   694
489c9b5090e2 Initial load
duke
parents:
diff changeset
   695
    set_adaptation_cycles(samples_count());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   696
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   697
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   698
489c9b5090e2 Initial load
duke
parents:
diff changeset
   699
// Set the top of the whole space.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   700
// Mark the the holes in chunks below the top() as invalid.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   701
void MutableNUMASpace::set_top(HeapWord* value) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   702
  bool found_top = false;
616
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   703
  for (int i = 0; i < lgrp_spaces()->length();) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   704
    LGRPSpace *ls = lgrp_spaces()->at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   705
    MutableSpace *s = ls->space();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   706
    HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   707
489c9b5090e2 Initial load
duke
parents:
diff changeset
   708
    if (s->contains(value)) {
616
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   709
      // Check if setting the chunk's top to a given value would create a hole less than
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   710
      // a minimal object; assuming that's not the last chunk in which case we don't care.
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   711
      if (i < lgrp_spaces()->length() - 1) {
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   712
        size_t remainder = pointer_delta(s->end(), value);
1668
8ec481b8f514 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 1615
diff changeset
   713
        const size_t min_fill_size = CollectedHeap::min_fill_size();
8ec481b8f514 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 1615
diff changeset
   714
        if (remainder < min_fill_size && remainder > 0) {
8ec481b8f514 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 1615
diff changeset
   715
          // Add a minimum size filler object; it will cross the chunk boundary.
8ec481b8f514 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 1615
diff changeset
   716
          CollectedHeap::fill_with_object(value, min_fill_size);
8ec481b8f514 6578152: fill_region_with_object has usability and safety issues
jcoomes
parents: 1615
diff changeset
   717
          value += min_fill_size;
616
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   718
          assert(!s->contains(value), "Should be in the next chunk");
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   719
          // Restart the loop from the same chunk, since the value has moved
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   720
          // to the next one.
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   721
          continue;
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   722
        }
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   723
      }
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   724
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   725
      if (!os::numa_has_static_binding() && top < value && top < s->end()) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   726
        ls->add_invalid_region(MemRegion(top, value));
489c9b5090e2 Initial load
duke
parents:
diff changeset
   727
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   728
      s->set_top(value);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   729
      found_top = true;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   730
    } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   731
        if (found_top) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   732
            s->set_top(s->bottom());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   733
        } else {
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   734
          if (!os::numa_has_static_binding() && top < s->end()) {
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   735
            ls->add_invalid_region(MemRegion(top, s->end()));
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   736
          }
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   737
          s->set_top(s->end());
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   738
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   739
    }
616
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   740
    i++;
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   741
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   742
  MutableSpace::set_top(value);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   743
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   744
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   745
void MutableNUMASpace::clear(bool mangle_space) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   746
  MutableSpace::set_top(bottom());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   747
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
971
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   748
    // Never mangle NUMA spaces because the mangling will
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   749
    // bind the memory to a possibly unwanted lgroup.
f0b20be4165d 6672698: mangle_unused_area() should not remangle the entire heap at each collection.
jmasa
parents: 616
diff changeset
   750
    lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   751
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   752
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   753
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   754
/*
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   755
   Linux supports static memory binding, therefore the most part of the
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   756
   logic dealing with the possible invalid page allocation is effectively
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   757
   disabled. Besides there is no notion of the home node in Linux. A
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   758
   thread is allowed to migrate freely. Although the scheduler is rather
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   759
   reluctant to move threads between the nodes. We check for the current
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   760
   node every allocation. And with a high probability a thread stays on
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   761
   the same node for some time allowing local access to recently allocated
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   762
   objects.
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   763
 */
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   764
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   765
HeapWord* MutableNUMASpace::allocate(size_t size) {
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   766
  Thread* thr = Thread::current();
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   767
  int lgrp_id = thr->lgrp_id();
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   768
  if (lgrp_id == -1 || !os::numa_has_group_homing()) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   769
    lgrp_id = os::numa_get_group_id();
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   770
    thr->set_lgrp_id(lgrp_id);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   771
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   772
489c9b5090e2 Initial load
duke
parents:
diff changeset
   773
  int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   774
489c9b5090e2 Initial load
duke
parents:
diff changeset
   775
  // It is possible that a new CPU has been hotplugged and
489c9b5090e2 Initial load
duke
parents:
diff changeset
   776
  // we haven't reshaped the space accordingly.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   777
  if (i == -1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   778
    i = os::random() % lgrp_spaces()->length();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   779
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   780
1405
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   781
  LGRPSpace* ls = lgrp_spaces()->at(i);
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   782
  MutableSpace *s = ls->space();
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   783
  HeapWord *p = s->allocate(size);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   784
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   785
  if (p != NULL) {
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   786
    size_t remainder = s->free_in_words();
5694
1e0532a6abff 6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents: 2154
diff changeset
   787
    if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   788
      s->set_top(s->top() - size);
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   789
      p = NULL;
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   790
    }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   791
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   792
  if (p != NULL) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   793
    if (top() < s->top()) { // Keep _top updated.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   794
      MutableSpace::set_top(s->top());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   795
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   796
  }
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   797
  // Make the page allocation happen here if there is no static binding..
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   798
  if (p != NULL && !os::numa_has_static_binding()) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   799
    for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   800
      *(int*)i = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   801
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   802
  }
1405
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   803
  if (p == NULL) {
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   804
    ls->set_allocation_failed();
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   805
  }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   806
  return p;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   807
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   808
489c9b5090e2 Initial load
duke
parents:
diff changeset
   809
// This version is lock-free.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   810
HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   811
  Thread* thr = Thread::current();
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   812
  int lgrp_id = thr->lgrp_id();
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   813
  if (lgrp_id == -1 || !os::numa_has_group_homing()) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   814
    lgrp_id = os::numa_get_group_id();
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   815
    thr->set_lgrp_id(lgrp_id);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   816
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   817
489c9b5090e2 Initial load
duke
parents:
diff changeset
   818
  int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   819
  // It is possible that a new CPU has been hotplugged and
489c9b5090e2 Initial load
duke
parents:
diff changeset
   820
  // we haven't reshaped the space accordingly.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   821
  if (i == -1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   822
    i = os::random() % lgrp_spaces()->length();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   823
  }
1405
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   824
  LGRPSpace *ls = lgrp_spaces()->at(i);
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   825
  MutableSpace *s = ls->space();
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   826
  HeapWord *p = s->cas_allocate(size);
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   827
  if (p != NULL) {
616
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   828
    size_t remainder = pointer_delta(s->end(), p + size);
5694
1e0532a6abff 6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
kvn
parents: 2154
diff changeset
   829
    if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   830
      if (s->cas_deallocate(p, size)) {
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   831
        // We were the last to allocate and created a fragment less than
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   832
        // a minimal object.
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   833
        p = NULL;
616
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   834
      } else {
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   835
        guarantee(false, "Deallocation should always succeed");
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   836
      }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   837
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   838
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   839
  if (p != NULL) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   840
    HeapWord* cur_top, *cur_chunk_top = p + size;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   841
    while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   842
      if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   843
        break;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   844
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   845
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   846
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   847
388
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   848
  // Make the page allocation happen here if there is no static binding.
bcc631c5bbec 6684395: Port NUMA-aware allocator to linux
iveresov
parents: 1
diff changeset
   849
  if (p != NULL && !os::numa_has_static_binding() ) {
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   850
    for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   851
      *(int*)i = 0;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   852
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   853
  }
1405
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   854
  if (p == NULL) {
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   855
    ls->set_allocation_failed();
ce6e6fe90107 6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
iveresov
parents: 977
diff changeset
   856
  }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   857
  return p;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   858
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   859
489c9b5090e2 Initial load
duke
parents:
diff changeset
   860
void MutableNUMASpace::print_short_on(outputStream* st) const {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   861
  MutableSpace::print_short_on(st);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   862
  st->print(" (");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   863
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   864
    st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   865
    lgrp_spaces()->at(i)->space()->print_short_on(st);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   866
    if (i < lgrp_spaces()->length() - 1) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   867
      st->print(", ");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   868
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   869
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   870
  st->print(")");
489c9b5090e2 Initial load
duke
parents:
diff changeset
   871
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   872
489c9b5090e2 Initial load
duke
parents:
diff changeset
   873
void MutableNUMASpace::print_on(outputStream* st) const {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   874
  MutableSpace::print_on(st);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   875
  for (int i = 0; i < lgrp_spaces()->length(); i++) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   876
    LGRPSpace *ls = lgrp_spaces()->at(i);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   877
    st->print("    lgrp %d", ls->lgrp_id());
489c9b5090e2 Initial load
duke
parents:
diff changeset
   878
    ls->space()->print_on(st);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   879
    if (NUMAStats) {
391
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   880
      for (int i = 0; i < lgrp_spaces()->length(); i++) {
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   881
        lgrp_spaces()->at(i)->accumulate_statistics(page_size());
f889070a8684 6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
iveresov
parents: 388
diff changeset
   882
      }
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   883
      st->print("    local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
489c9b5090e2 Initial load
duke
parents:
diff changeset
   884
                ls->space_stats()->_local_space / K,
489c9b5090e2 Initial load
duke
parents:
diff changeset
   885
                ls->space_stats()->_remote_space / K,
489c9b5090e2 Initial load
duke
parents:
diff changeset
   886
                ls->space_stats()->_unbiased_space / K,
489c9b5090e2 Initial load
duke
parents:
diff changeset
   887
                ls->space_stats()->_uncommited_space / K,
489c9b5090e2 Initial load
duke
parents:
diff changeset
   888
                ls->space_stats()->_large_pages,
489c9b5090e2 Initial load
duke
parents:
diff changeset
   889
                ls->space_stats()->_small_pages);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   890
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   891
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   892
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   893
616
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   894
void MutableNUMASpace::verify(bool allow_dirty) {
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   895
  // This can be called after setting an arbitary value to the space's top,
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   896
  // so an object can cross the chunk boundary. We ensure the parsablity
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   897
  // of the space and just walk the objects in linear fashion.
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   898
  ensure_parsability();
4f2dfc0168e2 6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
iveresov
parents: 391
diff changeset
   899
  MutableSpace::verify(allow_dirty);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   900
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   901
489c9b5090e2 Initial load
duke
parents:
diff changeset
   902
// Scan pages and gather stats about page placement and size.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   903
void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   904
  clear_space_stats();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   905
  char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   906
  char* end = (char*)round_down((intptr_t) space()->end(), page_size);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   907
  if (start < end) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   908
    for (char *p = start; p < end;) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   909
      os::page_info info;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   910
      if (os::get_page_info(p, &info)) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   911
        if (info.size > 0) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   912
          if (info.size > (size_t)os::vm_page_size()) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   913
            space_stats()->_large_pages++;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   914
          } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   915
            space_stats()->_small_pages++;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   916
          }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   917
          if (info.lgrp_id == lgrp_id()) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   918
            space_stats()->_local_space += info.size;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   919
          } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   920
            space_stats()->_remote_space += info.size;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   921
          }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   922
          p += info.size;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   923
        } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   924
          p += os::vm_page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   925
          space_stats()->_uncommited_space += os::vm_page_size();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   926
        }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   927
      } else {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   928
        return;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   929
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   930
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   931
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   932
  space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
489c9b5090e2 Initial load
duke
parents:
diff changeset
   933
                                   pointer_delta(space()->end(), end, sizeof(char));
489c9b5090e2 Initial load
duke
parents:
diff changeset
   934
489c9b5090e2 Initial load
duke
parents:
diff changeset
   935
}
489c9b5090e2 Initial load
duke
parents:
diff changeset
   936
489c9b5090e2 Initial load
duke
parents:
diff changeset
   937
// Scan page_count pages and verify if they have the right size and right placement.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   938
// If invalid pages are found they are freed in hope that subsequent reallocation
489c9b5090e2 Initial load
duke
parents:
diff changeset
   939
// will be more successful.
489c9b5090e2 Initial load
duke
parents:
diff changeset
   940
void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
489c9b5090e2 Initial load
duke
parents:
diff changeset
   941
{
489c9b5090e2 Initial load
duke
parents:
diff changeset
   942
  char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   943
  char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   944
489c9b5090e2 Initial load
duke
parents:
diff changeset
   945
  if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   946
    set_last_page_scanned(range_start);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   947
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   948
489c9b5090e2 Initial load
duke
parents:
diff changeset
   949
  char *scan_start = last_page_scanned();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   950
  char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   951
489c9b5090e2 Initial load
duke
parents:
diff changeset
   952
  os::page_info page_expected, page_found;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   953
  page_expected.size = page_size;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   954
  page_expected.lgrp_id = lgrp_id();
489c9b5090e2 Initial load
duke
parents:
diff changeset
   955
489c9b5090e2 Initial load
duke
parents:
diff changeset
   956
  char *s = scan_start;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   957
  while (s < scan_end) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   958
    char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   959
    if (e == NULL) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   960
      break;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   961
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   962
    if (e != scan_end) {
489c9b5090e2 Initial load
duke
parents:
diff changeset
   963
      if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
489c9b5090e2 Initial load
duke
parents:
diff changeset
   964
          && page_expected.size != 0) {
11402
739e52129c84 7124829: NUMA: memory leak on Linux with large pages
iveresov
parents: 10565
diff changeset
   965
        os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
1
489c9b5090e2 Initial load
duke
parents:
diff changeset
   966
      }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   967
      page_expected = page_found;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   968
    }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   969
    s = e;
489c9b5090e2 Initial load
duke
parents:
diff changeset
   970
  }
489c9b5090e2 Initial load
duke
parents:
diff changeset
   971
489c9b5090e2 Initial load
duke
parents:
diff changeset
   972
  set_last_page_scanned(scan_end);
489c9b5090e2 Initial load
duke
parents:
diff changeset
   973
}