Merge
authorjwilhelm
Thu, 04 Jun 2015 14:19:51 +0200
changeset 31235 b302e98fef45
parent 31234 48000028382c (current diff)
parent 31033 5b6d0c94767a (diff)
child 31236 d4d3011aa98e
Merge
--- a/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/cms/vmCMSOperations.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -254,9 +254,9 @@
   if (_gc_cause != GCCause::_gc_locker &&
       gch->total_full_collections_completed() <= _full_gc_count_before) {
     // maybe we should change the condition to test _gc_cause ==
-    // GCCause::_java_lang_system_gc, instead of
-    // _gc_cause != GCCause::_gc_locker
-    assert(_gc_cause == GCCause::_java_lang_system_gc,
+    // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
+    // instead of _gc_cause != GCCause::_gc_locker
+    assert(GCCause::is_user_requested_gc(_gc_cause),
            "the only way to get here if this was a System.gc()-induced GC");
     assert(ExplicitGCInvokesConcurrent, "Error");
     // Now, wait for witnessing concurrent gc cycle to complete,
--- a/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -43,7 +43,7 @@
 }
 
 // Run a task; returns when the task is done, or the workers yield,
-// or the task is aborted, or the work gang is terminated via stop().
+// or the task is aborted.
 // A task that has been yielded can be continued via this interface
 // by using the same task repeatedly as the argument to the call.
 // It is expected that the YieldingFlexibleGangTask carries the appropriate
@@ -297,16 +297,9 @@
   WorkData data;
   int id;
   while (true) {
-    // Check if there is work to do or if we have been asked
-    // to terminate
+    // Check if there is work to do.
     gang()->internal_worker_poll(&data);
-    if (data.terminate()) {
-      // We have been asked to terminate.
-      assert(gang()->task() == NULL, "No task binding");
-      // set_status(TERMINATED);
-      return;
-    } else if (data.task() != NULL &&
-               data.sequence_number() != previous_sequence_number) {
+    if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
       // There is work to be done.
       // First check if we need to become active or if there
       // are already the requisite number of workers
--- a/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.hpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/cms/yieldingWorkgroup.hpp	Thu Jun 04 14:19:51 2015 +0200
@@ -176,7 +176,7 @@
   GangWorker* allocate_worker(uint which);
 
   // Run a task; returns when the task is done, or the workers yield,
-  // or the task is aborted, or the work gang is terminated via stop().
+  // or the task is aborted.
   // A task that has been yielded can be continued via this same interface
   // by using the same task repeatedly as the argument to the call.
   // It is expected that the YieldingFlexibleGangTask carries the appropriate
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -1183,7 +1183,7 @@
     IsGCActiveMark x;
 
     // Timing
-    assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
+    assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     {
@@ -2199,6 +2199,7 @@
   switch (cause) {
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
+    case GCCause::_dcmd_gc_run:             return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
     case GCCause::_update_allocation_context_stats_inc: return true;
     case GCCause::_wb_conc_mark:            return true;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Jun 04 14:19:51 2015 +0200
@@ -324,7 +324,8 @@
   // explicitly started if:
   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
-  // (c) cause == _g1_humongous_allocation
+  // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
+  // (d) cause == _g1_humongous_allocation
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
--- a/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/g1/vm_operations_g1.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -168,7 +168,7 @@
   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
   // that just started (or maybe one that was already in progress) to
   // finish.
-  if (_gc_cause == GCCause::_java_lang_system_gc &&
+  if (GCCause::is_user_requested_gc(_gc_cause) &&
       _should_initiate_conc_mark) {
     assert(ExplicitGCInvokesConcurrent,
            "the only way to be here is if ExplicitGCInvokesConcurrent is set");
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -130,7 +130,7 @@
   // Update the pause time.
   _major_timer.stop();
 
-  if (gc_cause != GCCause::_java_lang_system_gc ||
+  if (!GCCause::is_user_requested_gc(gc_cause) ||
       UseAdaptiveSizePolicyWithSystemGC) {
     double major_pause_in_seconds = _major_timer.seconds();
     double major_pause_in_ms = major_pause_in_seconds * MILLIUNITS;
--- a/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/parallel/psMarkSweep.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -272,7 +272,7 @@
       // Don't check if the size_policy is ready here.  Let
       // the size_policy check that internally.
       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
-          ((gc_cause != GCCause::_java_lang_system_gc) ||
+          (!GCCause::is_user_requested_gc(gc_cause) ||
             UseAdaptiveSizePolicyWithSystemGC)) {
         // Swap the survivor spaces if from_space is empty. The
         // resize_young_gen() called below is normally used after
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -2053,7 +2053,7 @@
     marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
 
     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
-      && gc_cause == GCCause::_java_lang_system_gc;
+      && GCCause::is_user_requested_gc(gc_cause);
     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
 
     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
@@ -2089,7 +2089,7 @@
       // Don't check if the size_policy is ready here.  Let
       // the size_policy check that internally.
       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
-          ((gc_cause != GCCause::_java_lang_system_gc) ||
+          (!GCCause::is_user_requested_gc(gc_cause) ||
             UseAdaptiveSizePolicyWithSystemGC)) {
         // Swap the survivor spaces if from_space is empty. The
         // resize_young_gen() called below is normally used after
--- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -290,7 +290,7 @@
 
   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 
-  if ((gc_cause != GCCause::_java_lang_system_gc) ||
+  if (!GCCause::is_user_requested_gc(gc_cause) ||
        UseAdaptiveSizePolicyWithSystemGC) {
     // Gather the feedback data for eden occupancy.
     young_gen->eden_space()->accumulate_statistics();
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -960,7 +960,7 @@
                             GCCause::to_string(gch->gc_cause()));
       }
       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
-             (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
+             (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
              !gch->incremental_collection_failed(),
              "Twice in a row");
       seen_incremental_collection_failed = false;
--- a/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -244,7 +244,7 @@
   // Update the pause time.
   _minor_timer.stop();
 
-  if (gc_cause != GCCause::_java_lang_system_gc ||
+  if (!GCCause::is_user_requested_gc(gc_cause) ||
       UseAdaptiveSizePolicyWithSystemGC) {
     double minor_pause_in_seconds = _minor_timer.seconds();
     double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
--- a/hotspot/src/share/vm/gc/shared/gcCause.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/gcCause.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -103,6 +103,9 @@
     case _last_ditch_collection:
       return "Last ditch collection";
 
+    case _dcmd_gc_run:
+      return "Diagnostic Command";
+
     case _last_gc_cause:
       return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";
 
--- a/hotspot/src/share/vm/gc/shared/gcCause.hpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/gcCause.hpp	Thu Jun 04 14:19:51 2015 +0200
@@ -74,12 +74,15 @@
     _g1_humongous_allocation,
 
     _last_ditch_collection,
+
+    _dcmd_gc_run,
+
     _last_gc_cause
   };
 
   inline static bool is_user_requested_gc(GCCause::Cause cause) {
     return (cause == GCCause::_java_lang_system_gc ||
-            cause == GCCause::_jvmti_force_gc);
+            cause == GCCause::_dcmd_gc_run);
   }
 
   inline static bool is_serviceability_requested_gc(GCCause::Cause
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -304,9 +304,16 @@
 }
 
 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  return UseConcMarkSweepGC &&
-         ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
-          (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
+  if (!UseConcMarkSweepGC) {
+    return false;
+  }
+
+  switch (cause) {
+    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
+    case GCCause::_java_lang_system_gc:
+    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
+    default:                            return false;
+  }
 }
 
 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
--- a/hotspot/src/share/vm/gc/shared/workgroup.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/workgroup.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -47,7 +47,6 @@
                          /* allow_vm_block */ are_GC_task_threads,
                                               Monitor::_safepoint_check_sometimes);
   assert(monitor() != NULL, "Failed to allocate monitor");
-  _terminate = false;
   _task = NULL;
   _sequence_number = 0;
   _started_workers = 0;
@@ -106,18 +105,6 @@
   return true;
 }
 
-AbstractWorkGang::~AbstractWorkGang() {
-  if (TraceWorkGang) {
-    tty->print_cr("Destructing work gang %s", name());
-  }
-  stop();   // stop all the workers
-  for (uint worker = 0; worker < total_workers(); worker += 1) {
-    delete gang_worker(worker);
-  }
-  delete gang_workers();
-  delete monitor();
-}
-
 GangWorker* AbstractWorkGang::gang_worker(uint i) const {
   // Array index bounds checking.
   GangWorker* result = NULL;
@@ -175,28 +162,9 @@
   WorkGang::run_task(task, (uint) active_workers());
 }
 
-void AbstractWorkGang::stop() {
-  // Tell all workers to terminate, then wait for them to become inactive.
-  MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
-  if (TraceWorkGang) {
-    tty->print_cr("Stopping work gang %s task %s", name(), task()->name());
-  }
-  _task = NULL;
-  _terminate = true;
-  monitor()->notify_all();
-  while (finished_workers() < active_workers()) {
-    if (TraceWorkGang) {
-      tty->print_cr("Waiting in work gang %s: %u/%u finished",
-                    name(), finished_workers(), active_workers());
-    }
-    monitor()->wait(/* no_safepoint_check */ true);
-  }
-}
-
 void AbstractWorkGang::internal_worker_poll(WorkData* data) const {
   assert(monitor()->owned_by_self(), "worker_poll is an internal method");
   assert(data != NULL, "worker data is null");
-  data->set_terminate(terminate());
   data->set_task(task());
   data->set_sequence_number(sequence_number());
 }
@@ -259,7 +227,7 @@
 void GangWorker::loop() {
   int previous_sequence_number = 0;
   Monitor* gang_monitor = gang()->monitor();
-  for ( ; /* !terminate() */; ) {
+  for ( ; ; ) {
     WorkData data;
     int part;  // Initialized below.
     {
@@ -272,8 +240,6 @@
       if (TraceWorkGang) {
         tty->print("Polled outside for work in gang %s worker %u",
                    gang()->name(), id());
-        tty->print("  terminate: %s",
-                   data.terminate() ? "true" : "false");
         tty->print("  sequence: %d (prev: %d)",
                    data.sequence_number(), previous_sequence_number);
         if (data.task() != NULL) {
@@ -283,13 +249,7 @@
         }
         tty->cr();
       }
-      for ( ; /* break or return */; ) {
-        // Terminate if requested.
-        if (data.terminate()) {
-          gang()->internal_note_finish();
-          gang_monitor->notify_all();
-          return;
-        }
+      for ( ; /* break */; ) {
         // Check for new work.
         if ((data.task() != NULL) &&
             (data.sequence_number() != previous_sequence_number)) {
@@ -306,8 +266,6 @@
         if (TraceWorkGang) {
           tty->print("Polled inside for work in gang %s worker %u",
                      gang()->name(), id());
-          tty->print("  terminate: %s",
-                     data.terminate() ? "true" : "false");
           tty->print("  sequence: %d (prev: %d)",
                      data.sequence_number(), previous_sequence_number);
           if (data.task() != NULL) {
--- a/hotspot/src/share/vm/gc/shared/workgroup.hpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/gc/shared/workgroup.hpp	Thu Jun 04 14:19:51 2015 +0200
@@ -103,16 +103,15 @@
 // An abstract class representing a gang of workers.
 // You subclass this to supply an implementation of run_task().
 class AbstractWorkGang: public CHeapObj<mtInternal> {
-  // Here's the public interface to this class.
+protected:
+  // Work gangs are never deleted, so no need to cleanup.
+  ~AbstractWorkGang() { ShouldNotReachHere(); }
 public:
-  // Constructor and destructor.
+  // Constructor.
   AbstractWorkGang(const char* name, bool are_GC_task_threads,
                    bool are_ConcurrentGC_threads);
-  ~AbstractWorkGang();
   // Run a task, returns when the task is done (or terminated).
   virtual void run_task(AbstractGangTask* task) = 0;
-  // Stop and terminate all workers.
-  virtual void stop();
   // Return true if more workers should be applied to the task.
   virtual bool needs_more_workers() const { return true; }
 public:
@@ -129,8 +128,6 @@
   Monitor*  _monitor;
   // The count of the number of workers in the gang.
   uint _total_workers;
-  // Whether the workers should terminate.
-  bool _terminate;
   // The array of worker threads for this gang.
   // This is only needed for cleaning up.
   GangWorker** _gang_workers;
@@ -153,9 +150,6 @@
   virtual uint active_workers() const {
     return _total_workers;
   }
-  bool terminate() const {
-    return _terminate;
-  }
   GangWorker** gang_workers() const {
     return _gang_workers;
   }
@@ -205,21 +199,16 @@
 class WorkData: public StackObj {
   // This would be a struct, but I want accessor methods.
 private:
-  bool              _terminate;
   AbstractGangTask* _task;
   int               _sequence_number;
 public:
   // Constructor and destructor
   WorkData() {
-    _terminate       = false;
     _task            = NULL;
     _sequence_number = 0;
   }
   ~WorkData() {
   }
-  // Accessors and modifiers
-  bool terminate()                       const { return _terminate;  }
-  void set_terminate(bool value)               { _terminate = value; }
   AbstractGangTask* task()               const { return _task; }
   void set_task(AbstractGangTask* value)       { _task = value; }
   int sequence_number()                  const { return _sequence_number; }
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -614,8 +614,7 @@
   Metachunk* _chunks_in_use[NumberOfInUseLists];
   Metachunk* _current_chunk;
 
-  // Number of small chunks to allocate to a manager
-  // If class space manager, small chunks are unlimited
+  // Maximum number of small chunks to allocate to a SpaceManager
   static uint const _small_chunk_limit;
 
   // Sum of all space in allocated chunks
@@ -730,6 +729,8 @@
   // Block allocation and deallocation.
   // Allocates a block from the current chunk
   MetaWord* allocate(size_t word_size);
+  // Allocates a block from a small chunk
+  MetaWord* get_small_chunk_and_allocate(size_t word_size);
 
   // Helper for allocations
   MetaWord* allocate_work(size_t word_size);
@@ -2011,9 +2012,8 @@
 size_t SpaceManager::calc_chunk_size(size_t word_size) {
 
   // Decide between a small chunk and a medium chunk.  Up to
-  // _small_chunk_limit small chunks can be allocated but
-  // once a medium chunk has been allocated, no more small
-  // chunks will be allocated.
+  // _small_chunk_limit small chunks can be allocated.
+  // After that a medium chunk is preferred.
   size_t chunk_word_size;
   if (chunks_in_use(MediumIndex) == NULL &&
       sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) {
@@ -2081,7 +2081,7 @@
                             word_size, words_used, words_left);
   }
 
-  // Get another chunk out of the virtual space
+  // Get another chunk
   size_t grow_chunks_by_words = calc_chunk_size(word_size);
   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
 
@@ -2412,6 +2412,43 @@
   return next;
 }
 
+/*
+ * The policy is to allocate up to _small_chunk_limit small chunks
+ * after which only medium chunks are allocated.  This is done to
+ * reduce fragmentation.  In some cases, this can result in a lot
+ * of small chunks being allocated to the point where it's not
+ * possible to expand.  If this happens, there may be no medium chunks
+ * available and OOME would be thrown.  Instead of doing that,
+ * if the allocation request size fits in a small chunk, an attempt
+ * will be made to allocate a small chunk.
+ */
+MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
+  if (word_size + Metachunk::overhead() > small_chunk_size()) {
+    return NULL;
+  }
+
+  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
+
+  Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
+
+  MetaWord* mem = NULL;
+
+  if (chunk != NULL) {
+    // Add chunk to the in-use chunk list and do an allocation from it.
+    // Add to this manager's list of chunks in use.
+    add_chunk(chunk, false);
+    mem = chunk->allocate(word_size);
+
+    inc_used_metrics(word_size);
+
+    // Track metaspace memory usage statistic.
+    track_metaspace_memory_usage();
+  }
+
+  return mem;
+}
+
 MetaWord* SpaceManager::allocate(size_t word_size) {
   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
 
@@ -3560,7 +3597,18 @@
   }
 
   if (result == NULL) {
-    report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
+    SpaceManager* sm;
+    if (is_class_space_allocation(mdtype)) {
+      sm = loader_data->metaspace_non_null()->class_vsm();
+    } else {
+      sm = loader_data->metaspace_non_null()->vsm();
+    }
+
+    result = sm->get_small_chunk_and_allocate(word_size);
+
+    if (result == NULL) {
+      report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
+    }
   }
 
   // Zero initialize.
--- a/hotspot/src/share/vm/services/diagnosticCommand.cpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp	Thu Jun 04 14:19:51 2015 +0200
@@ -315,7 +315,7 @@
 
 void SystemGCDCmd::execute(DCmdSource source, TRAPS) {
   if (!DisableExplicitGC) {
-    Universe::heap()->collect(GCCause::_java_lang_system_gc);
+    Universe::heap()->collect(GCCause::_dcmd_gc_run);
   } else {
     output()->print_cr("Explicit GC is disabled, no GC has been performed.");
   }
--- a/hotspot/src/share/vm/utilities/fakeRttiSupport.hpp	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/src/share/vm/utilities/fakeRttiSupport.hpp	Thu Jun 04 14:19:51 2015 +0200
@@ -89,11 +89,11 @@
     return ((uintx)1) << validate_tag(tag);
   }
 
-  static TagType validate_tag(uintx tag) {
-    // Type of tag is not TagType to dodge useless MacOSX compiler warning.
-    assert(tag < (sizeof(uintx) * BitsPerByte),
-           err_msg("Tag " UINTX_FORMAT " is too large", tag));
-    return static_cast<TagType>(tag);
+  static TagType validate_tag(TagType tag) {
+    assert(0 <= tag, err_msg("Tag " INTX_FORMAT " is negative", (intx)tag));
+    assert(tag < BitsPerWord,
+           err_msg("Tag " UINTX_FORMAT " is too large", (uintx)tag));
+    return tag;
   }
 };
 
--- a/hotspot/test/gc/TestSmallHeap.java	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/test/gc/TestSmallHeap.java	Thu Jun 04 14:19:51 2015 +0200
@@ -27,6 +27,7 @@
  * @requires vm.gc=="null"
  * @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
  * @requires vm.compMode != "Xcomp"
+ * @requires vm.opt.UseCompressedOops != false
  * @summary Verify that starting the VM with a small heap works
  * @library /testlibrary /../../test/lib
  * @modules java.management/sun.management
--- a/hotspot/test/serviceability/dcmd/gc/RunGCTest.java	Thu Jun 04 08:05:47 2015 -0400
+++ b/hotspot/test/serviceability/dcmd/gc/RunGCTest.java	Thu Jun 04 14:19:51 2015 +0200
@@ -59,7 +59,7 @@
         }
 
         OutputAnalyzer output = new OutputAnalyzer(gcLog, "");
-        output.shouldMatch(".*\\[Full GC \\(System(\\.gc\\(\\))?.*");
+        output.shouldContain("[Full GC (Diagnostic Command)");
     }
 
     @Test