src/hotspot/share/gc/shared/collectedHeap.hpp
branchepsilon-gc-branch
changeset 56276 ee5e58456be5
parent 55974 06122633fead
parent 49054 fa4c8865a4ff
child 56406 e629240491c7
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Feb 08 21:05:35 2018 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Mon Mar 12 12:22:21 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@
 class GCMemoryManager;
 class MemoryPool;
 class MetaspaceSummary;
+class SoftRefPolicy;
 class Thread;
 class ThreadClosure;
 class VirtualSpaceSummary;
@@ -101,10 +102,6 @@
 
   GCHeapLog* _gc_heap_log;
 
-  // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
-  // or INCLUDE_JVMCI is being used
-  bool _defer_initial_card_mark;
-
   MemRegion _reserved;
 
  protected:
@@ -129,13 +126,6 @@
   // Constructor
   CollectedHeap();
 
-  // Do common initializations that must follow instance construction,
-  // for example, those needing virtual calls.
-  // This code could perhaps be moved into initialize() but would
-  // be slightly more awkward because we want the latter to be a
-  // pure virtual.
-  void pre_initialize();
-
   // Create a new tlab. All TLAB allocations must go through this.
   virtual HeapWord* allocate_new_tlab(size_t size);
 
@@ -409,45 +399,6 @@
     return 0;
   }
 
-  // Can a compiler initialize a new object without store barriers?
-  // This permission only extends from the creation of a new object
-  // via a TLAB up to the first subsequent safepoint. If such permission
-  // is granted for this heap type, the compiler promises to call
-  // defer_store_barrier() below on any slow path allocation of
-  // a new object for which such initializing store barriers will
-  // have been elided.
-  virtual bool can_elide_tlab_store_barriers() const = 0;
-
-  // If a compiler is eliding store barriers for TLAB-allocated objects,
-  // there is probably a corresponding slow path which can produce
-  // an object allocated anywhere.  The compiler's runtime support
-  // promises to call this function on such a slow-path-allocated
-  // object before performing initializations that have elided
-  // store barriers. Returns new_obj, or maybe a safer copy thereof.
-  virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
-
-  // Answers whether an initializing store to a new object currently
-  // allocated at the given address doesn't need a store
-  // barrier. Returns "true" if it doesn't need an initializing
-  // store barrier; answers "false" if it does.
-  virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
-
-  // If a compiler is eliding store barriers for TLAB-allocated objects,
-  // we will be informed of a slow-path allocation by a call
-  // to new_store_pre_barrier() above. Such a call precedes the
-  // initialization of the object itself, and no post-store-barriers will
-  // be issued. Some heap types require that the barrier strictly follows
-  // the initializing stores. (This is currently implemented by deferring the
-  // barrier until the next slow-path allocation or gc-related safepoint.)
-  // This interface answers whether a particular heap type needs the card
-  // mark to be thus strictly sequenced after the stores.
-  virtual bool card_mark_must_follow_store() const = 0;
-
-  // If the CollectedHeap was asked to defer a store barrier above,
-  // this informs it to flush such a deferred store barrier to the
-  // remembered set.
-  virtual void flush_deferred_store_barrier(JavaThread* thread);
-
   // Perform a collection of the heap; intended for use in implementing
   // "System.gc".  This probably implies as full a collection as the
   // "CollectedHeap" supports.
@@ -462,6 +413,10 @@
   // the context of the vm thread.
   virtual void collect_as_vm_thread(GCCause::Cause cause);
 
+  virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
+                                                       size_t size,
+                                                       Metaspace::MetadataType mdtype);
+
   // Returns the barrier set for this heap
   BarrierSet* barrier_set() { return _barrier_set; }
   void set_barrier_set(BarrierSet* barrier_set);
@@ -489,6 +444,9 @@
   // Return the CollectorPolicy for the heap
   virtual CollectorPolicy* collector_policy() const = 0;
 
+  // Return the SoftRefPolicy for the heap;
+  virtual SoftRefPolicy* soft_ref_policy() = 0;
+
   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
 
@@ -543,7 +501,7 @@
   void pre_full_gc_dump(GCTimer* timer);
   void post_full_gc_dump(GCTimer* timer);
 
-  VirtualSpaceSummary create_heap_space_summary();
+  virtual VirtualSpaceSummary create_heap_space_summary();
   GCHeapSummary create_heap_summary();
 
   MetaspaceSummary create_metaspace_summary();
@@ -650,20 +608,6 @@
     return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt);
   }
 #endif
-
- public:
-  // Copy the current allocation context statistics for the specified contexts.
-  // For each context in contexts, set the corresponding entries in the totals
-  // and accuracy arrays to the current values held by the statistics.  Each
-  // array should be of length len.
-  // Returns true if there are more stats available.
-  virtual bool copy_allocation_context_stats(const jint* contexts,
-                                             jlong* totals,
-                                             jbyte* accuracy,
-                                             jint len) {
-    return false;
-  }
-
 };
 
 // Class to set and reset the GC cause for a CollectedHeap.
@@ -673,16 +617,12 @@
   GCCause::Cause _previous_cause;
  public:
   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
-    assert(SafepointSynchronize::is_at_safepoint(),
-           "This method manipulates heap state without locking");
     _heap = heap;
     _previous_cause = _heap->gc_cause();
     _heap->set_gc_cause(cause);
   }
 
   ~GCCauseSetter() {
-    assert(SafepointSynchronize::is_at_safepoint(),
-          "This method manipulates heap state without locking");
     _heap->set_gc_cause(_previous_cause);
   }
 };