8033552: Fix missing missing volatile specifiers in CAS operations in GC code
authoreosterlund
Tue, 20 Sep 2016 15:42:17 -0400
changeset 41283 2615c024f3eb
parent 41282 474076f73ba1
child 41286 802f69f05345
8033552: Fix missing missing volatile specifiers in CAS operations in GC code Summary: Add missing volatile specifiers. Reviewed-by: kbarrett, tschatzl
hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp
hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp
hotspot/src/share/vm/gc/g1/sparsePRT.cpp
hotspot/src/share/vm/gc/g1/sparsePRT.hpp
hotspot/src/share/vm/gc/parallel/mutableSpace.hpp
hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.hpp
hotspot/src/share/vm/gc/parallel/psYoungGen.hpp
hotspot/src/share/vm/gc/parallel/vmStructs_parallelgc.hpp
hotspot/src/share/vm/gc/serial/defNewGeneration.cpp
hotspot/src/share/vm/gc/serial/defNewGeneration.hpp
hotspot/src/share/vm/gc/shared/collectedHeap.hpp
hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp
hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp
hotspot/src/share/vm/gc/shared/generation.hpp
hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp
hotspot/src/share/vm/jvmci/jvmciCompilerToVM.hpp
hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -258,16 +258,15 @@
 // the closure ParMarkFromRootsClosure.
 class ParPushOrMarkClosure: public MetadataAwareOopClosure {
  private:
-  CMSCollector*    _collector;
-  MemRegion        _whole_span;
-  MemRegion        _span;        // local chunk
-  CMSBitMap*       _bit_map;
-  OopTaskQueue*    _work_queue;
-  CMSMarkStack*    _overflow_stack;
-  HeapWord*  const _finger;
-  HeapWord** const _global_finger_addr;
-  ParMarkFromRootsClosure* const
-                   _parent;
+  CMSCollector*                  _collector;
+  MemRegion                      _whole_span;
+  MemRegion                      _span;       // local chunk
+  CMSBitMap*                     _bit_map;
+  OopTaskQueue*                  _work_queue;
+  CMSMarkStack*                  _overflow_stack;
+  HeapWord*  const               _finger;
+  HeapWord* volatile* const      _global_finger_addr;
+  ParMarkFromRootsClosure* const _parent;
  protected:
   DO_OOP_WORK_DEFN
  public:
@@ -277,7 +276,7 @@
                        OopTaskQueue* work_queue,
                        CMSMarkStack* mark_stack,
                        HeapWord* finger,
-                       HeapWord** global_finger_addr,
+                       HeapWord* volatile* global_finger_addr,
                        ParMarkFromRootsClosure* parent);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -3025,14 +3025,14 @@
 
 // MT Concurrent Marking Task
 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
-  CMSCollector* _collector;
-  uint          _n_workers;       // requested/desired # workers
-  bool          _result;
-  CompactibleFreeListSpace*  _cms_space;
-  char          _pad_front[64];   // padding to ...
-  HeapWord*     _global_finger;   // ... avoid sharing cache line
-  char          _pad_back[64];
-  HeapWord*     _restart_addr;
+  CMSCollector*             _collector;
+  uint                      _n_workers;      // requested/desired # workers
+  bool                      _result;
+  CompactibleFreeListSpace* _cms_space;
+  char                      _pad_front[64];   // padding to ...
+  HeapWord* volatile        _global_finger;   // ... avoid sharing cache line
+  char                      _pad_back[64];
+  HeapWord*                 _restart_addr;
 
   //  Exposed here for yielding support
   Mutex* const _bit_map_lock;
@@ -3068,7 +3068,7 @@
 
   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
 
-  HeapWord** global_finger_addr() { return &_global_finger; }
+  HeapWord* volatile* global_finger_addr() { return &_global_finger; }
 
   CMSConcMarkingTerminator* terminator() { return &_term; }
 
@@ -6554,7 +6554,7 @@
 
   // Note: the local finger doesn't advance while we drain
   // the stack below, but the global finger sure can and will.
-  HeapWord** gfa = _task->global_finger_addr();
+  HeapWord* volatile* gfa = _task->global_finger_addr();
   ParPushOrMarkClosure pushOrMarkClosure(_collector,
                                          _span, _bit_map,
                                          _work_queue,
@@ -6721,7 +6721,7 @@
                                            OopTaskQueue* work_queue,
                                            CMSMarkStack*  overflow_stack,
                                            HeapWord* finger,
-                                           HeapWord** global_finger_addr,
+                                           HeapWord* volatile* global_finger_addr,
                                            ParMarkFromRootsClosure* parent) :
   MetadataAwareOopClosure(collector->ref_processor()),
   _collector(collector),
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -724,12 +724,12 @@
   // Support for parallelizing young gen rescan in CMS remark phase
   ParNewGeneration* _young_gen;
 
-  HeapWord** _top_addr;    // ... Top of Eden
-  HeapWord** _end_addr;    // ... End of Eden
-  Mutex*     _eden_chunk_lock;
-  HeapWord** _eden_chunk_array; // ... Eden partitioning array
-  size_t     _eden_chunk_index; // ... top (exclusive) of array
-  size_t     _eden_chunk_capacity;  // ... max entries in array
+  HeapWord* volatile* _top_addr;    // ... Top of Eden
+  HeapWord**          _end_addr;    // ... End of Eden
+  Mutex*              _eden_chunk_lock;
+  HeapWord**          _eden_chunk_array; // ... Eden partitioning array
+  size_t              _eden_chunk_index; // ... top (exclusive) of array
+  size_t              _eden_chunk_capacity;  // ... max entries in array
 
   // Support for parallelizing survivor space rescan
   HeapWord** _survivor_chunk_array;
--- a/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/g1/heapRegionRemSet.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -56,7 +56,7 @@
   PerRegionTable * _collision_list_next;
 
   // Global free list of PRTs
-  static PerRegionTable* _free_list;
+  static PerRegionTable* volatile _free_list;
 
 protected:
   // We need access in order to union things into the base table.
@@ -249,7 +249,7 @@
   static void test_fl_mem_size();
 };
 
-PerRegionTable* PerRegionTable::_free_list = NULL;
+PerRegionTable* volatile PerRegionTable::_free_list = NULL;
 
 size_t OtherRegionsTable::_max_fine_entries = 0;
 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
--- a/hotspot/src/share/vm/gc/g1/sparsePRT.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/g1/sparsePRT.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -283,7 +283,7 @@
 
 // ----------------------------------------------------------------------
 
-SparsePRT* SparsePRT::_head_expanded_list = NULL;
+SparsePRT* volatile SparsePRT::_head_expanded_list = NULL;
 
 void SparsePRT::add_to_expanded_list(SparsePRT* sprt) {
   // We could expand multiple times in a pause -- only put on list once.
--- a/hotspot/src/share/vm/gc/g1/sparsePRT.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/g1/sparsePRT.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -250,7 +250,7 @@
 
   bool should_be_on_expanded_list();
 
-  static SparsePRT* _head_expanded_list;
+  static SparsePRT* volatile _head_expanded_list;
 
 public:
   SparsePRT(HeapRegion* hr);
--- a/hotspot/src/share/vm/gc/parallel/mutableSpace.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/parallel/mutableSpace.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
   MemRegion _last_setup_region;
   size_t _alignment;
  protected:
-  HeapWord* _top;
+  HeapWord* volatile _top;
 
   MutableSpaceMangler* mangler() { return _mangler; }
 
@@ -69,7 +69,7 @@
   HeapWord* top() const                    { return _top;    }
   virtual void set_top(HeapWord* value)    { _top = value;   }
 
-  HeapWord** top_addr()                    { return &_top; }
+  HeapWord* volatile* top_addr()           { return &_top; }
   HeapWord** end_addr()                    { return &_end; }
 
   virtual void set_bottom(HeapWord* value) { _bottom = value; }
--- a/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -175,7 +175,7 @@
 
   bool supports_inline_contig_alloc() const { return !UseNUMA; }
 
-  HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
+  HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; }
   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
 
   void ensure_parsability(bool retire_tlabs);
--- a/hotspot/src/share/vm/gc/parallel/psYoungGen.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/parallel/psYoungGen.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -162,7 +162,7 @@
     return result;
   }
 
-  HeapWord** top_addr() const   { return eden_space()->top_addr(); }
+  HeapWord* volatile* top_addr() const   { return eden_space()->top_addr(); }
   HeapWord** end_addr() const   { return eden_space()->end_addr(); }
 
   // Iteration.
--- a/hotspot/src/share/vm/gc/parallel/vmStructs_parallelgc.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/parallel/vmStructs_parallelgc.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,8 @@
 #define SHARE_VM_GC_PARALLEL_VMSTRUCTS_PARALLELGC_HPP
 
 #define VM_STRUCTS_PARALLELGC(nonstatic_field, \
-                   static_field) \
+                              volatile_nonstatic_field, \
+                              static_field) \
                                                                                                                                      \
   /**********************/                                                                                                           \
   /* Parallel GC fields */                                                                                                           \
@@ -40,7 +41,7 @@
   nonstatic_field(ImmutableSpace,              _bottom,                                       HeapWord*)                             \
   nonstatic_field(ImmutableSpace,              _end,                                          HeapWord*)                             \
                                                                                                                                      \
-  nonstatic_field(MutableSpace,                _top,                                          HeapWord*)                             \
+  volatile_nonstatic_field(MutableSpace,       _top,                                          HeapWord*)                             \
                                                                                                                                      \
   nonstatic_field(PSYoungGen,                  _reserved,                                     MemRegion)                             \
   nonstatic_field(PSYoungGen,                  _virtual_space,                                PSVirtualSpace*)                       \
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -512,7 +512,7 @@
 }
 
 
-HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
+HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 
 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -225,7 +225,7 @@
   size_t max_survivor_size() const          { return _max_survivor_size; }
 
   bool supports_inline_contig_alloc() const { return true; }
-  HeapWord** top_addr() const;
+  HeapWord* volatile* top_addr() const;
   HeapWord** end_addr() const;
 
   // Thread-local allocation buffers
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -350,7 +350,7 @@
   // These functions return the addresses of the fields that define the
   // boundaries of the contiguous allocation area.  (These fields should be
   // physically near to one another.)
-  virtual HeapWord** top_addr() const {
+  virtual HeapWord* volatile* top_addr() const {
     guarantee(false, "inline contiguous allocation not supported");
     return NULL;
   }
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -721,7 +721,7 @@
   return _young_gen->supports_inline_contig_alloc();
 }
 
-HeapWord** GenCollectedHeap::top_addr() const {
+HeapWord* volatile* GenCollectedHeap::top_addr() const {
   return _young_gen->top_addr();
 }
 
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -184,7 +184,7 @@
   // We may support a shared contiguous allocation area, if the youngest
   // generation does.
   bool supports_inline_contig_alloc() const;
-  HeapWord** top_addr() const;
+  HeapWord* volatile* top_addr() const;
   HeapWord** end_addr() const;
 
   // Perform a full collection of the heap; intended for use in implementing
--- a/hotspot/src/share/vm/gc/shared/generation.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/gc/shared/generation.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -263,7 +263,7 @@
   // These functions return the addresses of the fields that define the
   // boundaries of the contiguous allocation area.  (These fields should be
   // physically near to one another.)
-  virtual HeapWord** top_addr() const { return NULL; }
+  virtual HeapWord* volatile* top_addr() const { return NULL; }
   virtual HeapWord** end_addr() const { return NULL; }
 
   // Thread-local allocation buffers
--- a/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -112,7 +112,7 @@
 
 bool       CompilerToVM::Data::_supports_inline_contig_alloc;
 HeapWord** CompilerToVM::Data::_heap_end_addr;
-HeapWord** CompilerToVM::Data::_heap_top_addr;
+HeapWord* volatile* CompilerToVM::Data::_heap_top_addr;
 int CompilerToVM::Data::_max_oop_map_stack_offset;
 
 jbyte* CompilerToVM::Data::cardtable_start_address;
@@ -153,7 +153,7 @@
 
   _supports_inline_contig_alloc = Universe::heap()->supports_inline_contig_alloc();
   _heap_end_addr = _supports_inline_contig_alloc ? Universe::heap()->end_addr() : (HeapWord**) -1;
-  _heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord**) -1;
+  _heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord* volatile*) -1;
 
   _max_oop_map_stack_offset = (OopMapValue::register_mask - VMRegImpl::stack2reg(0)->value()) * VMRegImpl::stack_slot_size;
   int max_oop_map_stack_index = _max_oop_map_stack_offset / VMRegImpl::stack_slot_size;
@@ -1604,4 +1604,3 @@
 int CompilerToVM::methods_count() {
   return sizeof(methods) / sizeof(JNINativeMethod);
 }
-
--- a/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.hpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/jvmci/jvmciCompilerToVM.hpp	Tue Sep 20 15:42:17 2016 -0400
@@ -58,7 +58,7 @@
 
     static bool _supports_inline_contig_alloc;
     static HeapWord** _heap_end_addr;
-    static HeapWord** _heap_top_addr;
+    static HeapWord* volatile* _heap_top_addr;
     static int _max_oop_map_stack_offset;
 
     static jbyte* cardtable_start_address;
--- a/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -69,7 +69,7 @@
                                                                                                                                      \
   static_field(CompilerToVM::Data,             _supports_inline_contig_alloc,          bool)                                         \
   static_field(CompilerToVM::Data,             _heap_end_addr,                         HeapWord**)                                   \
-  static_field(CompilerToVM::Data,             _heap_top_addr,                         HeapWord**)                                   \
+  static_field(CompilerToVM::Data,             _heap_top_addr,                         HeapWord* volatile*)                          \
                                                                                                                                      \
   static_field(CompilerToVM::Data,             _max_oop_map_stack_offset,              int)                                          \
                                                                                                                                      \
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Sep 20 20:22:19 2016 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Sep 20 15:42:17 2016 -0400
@@ -2970,6 +2970,7 @@
 
 #if INCLUDE_ALL_GCS
   VM_STRUCTS_PARALLELGC(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
+                        GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
                         GENERATE_STATIC_VM_STRUCT_ENTRY)
 
   VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
@@ -2982,7 +2983,7 @@
 
 #if INCLUDE_TRACE
   VM_STRUCTS_TRACE(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
-                GENERATE_STATIC_VM_STRUCT_ENTRY)
+                   GENERATE_STATIC_VM_STRUCT_ENTRY)
 #endif
 
   VM_STRUCTS_EXT(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
@@ -3168,11 +3169,12 @@
 
 #if INCLUDE_ALL_GCS
   VM_STRUCTS_PARALLELGC(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
-             CHECK_STATIC_VM_STRUCT_ENTRY);
+                        CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
+                        CHECK_STATIC_VM_STRUCT_ENTRY);
 
   VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
-             CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
-             CHECK_STATIC_VM_STRUCT_ENTRY);
+                 CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
+                 CHECK_STATIC_VM_STRUCT_ENTRY);
 
   VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
                 CHECK_STATIC_VM_STRUCT_ENTRY);
@@ -3181,7 +3183,7 @@
 
 #if INCLUDE_TRACE
   VM_STRUCTS_TRACE(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
-                CHECK_STATIC_VM_STRUCT_ENTRY);
+                   CHECK_STATIC_VM_STRUCT_ENTRY);
 #endif
 
   VM_STRUCTS_EXT(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
@@ -3293,6 +3295,7 @@
                         CHECK_NO_OP));
 #if INCLUDE_ALL_GCS
   debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT,
+                                   ENSURE_FIELD_TYPE_PRESENT,
                                    ENSURE_FIELD_TYPE_PRESENT));
   debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT,
                             ENSURE_FIELD_TYPE_PRESENT,