8025856: Fix typos in the GC code
authorjwilhelm
Thu, 23 Jan 2014 14:47:23 +0100
changeset 22551 9bf46d16dcc6
parent 22550 820966182ab9
child 22552 a29022212180
8025856: Fix typos in the GC code Summary: Fix about 440 typos in comments in the VM code Reviewed-by: mgerdin, tschatzl, coleenp, kmo, jcoomes
hotspot/src/share/vm/ci/ciField.cpp
hotspot/src/share/vm/ci/ciField.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp
hotspot/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp
hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp
hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp
hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp
hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp
hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp
hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp
hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp
hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp
hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp
hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp
hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp
hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
hotspot/src/share/vm/gc_interface/gcCause.hpp
hotspot/src/share/vm/memory/allocation.hpp
hotspot/src/share/vm/memory/barrierSet.hpp
hotspot/src/share/vm/memory/binaryTreeDictionary.cpp
hotspot/src/share/vm/memory/binaryTreeDictionary.hpp
hotspot/src/share/vm/memory/blockOffsetTable.cpp
hotspot/src/share/vm/memory/blockOffsetTable.hpp
hotspot/src/share/vm/memory/cardTableModRefBS.cpp
hotspot/src/share/vm/memory/cardTableRS.cpp
hotspot/src/share/vm/memory/collectorPolicy.cpp
hotspot/src/share/vm/memory/collectorPolicy.hpp
hotspot/src/share/vm/memory/genCollectedHeap.cpp
hotspot/src/share/vm/memory/genCollectedHeap.hpp
hotspot/src/share/vm/memory/genMarkSweep.cpp
hotspot/src/share/vm/memory/genRemSet.hpp
hotspot/src/share/vm/memory/generation.hpp
hotspot/src/share/vm/memory/heap.cpp
hotspot/src/share/vm/memory/heap.hpp
hotspot/src/share/vm/memory/heapInspection.hpp
hotspot/src/share/vm/memory/metaspace.cpp
hotspot/src/share/vm/memory/metaspaceShared.cpp
hotspot/src/share/vm/memory/modRefBarrierSet.hpp
hotspot/src/share/vm/memory/referenceProcessor.cpp
hotspot/src/share/vm/memory/referenceProcessor.hpp
hotspot/src/share/vm/memory/resourceArea.hpp
hotspot/src/share/vm/memory/sharedHeap.hpp
hotspot/src/share/vm/memory/space.cpp
hotspot/src/share/vm/memory/space.hpp
hotspot/src/share/vm/memory/tenuredGeneration.cpp
hotspot/src/share/vm/memory/universe.cpp
hotspot/src/share/vm/oops/method.hpp
hotspot/src/share/vm/opto/runtime.cpp
hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/arguments.hpp
hotspot/src/share/vm/runtime/compilationPolicy.cpp
hotspot/src/share/vm/runtime/compilationPolicy.hpp
hotspot/src/share/vm/runtime/deoptimization.cpp
hotspot/src/share/vm/runtime/deoptimization.hpp
hotspot/src/share/vm/runtime/frame.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/globals_extension.hpp
hotspot/src/share/vm/runtime/handles.hpp
hotspot/src/share/vm/runtime/javaCalls.cpp
hotspot/src/share/vm/runtime/jniHandles.cpp
hotspot/src/share/vm/runtime/jniHandles.hpp
hotspot/src/share/vm/runtime/mutex.cpp
hotspot/src/share/vm/runtime/mutex.hpp
hotspot/src/share/vm/runtime/mutexLocker.hpp
hotspot/src/share/vm/runtime/objectMonitor.cpp
hotspot/src/share/vm/runtime/objectMonitor.hpp
hotspot/src/share/vm/runtime/orderAccess.hpp
hotspot/src/share/vm/runtime/os.cpp
hotspot/src/share/vm/runtime/os.hpp
hotspot/src/share/vm/runtime/park.cpp
hotspot/src/share/vm/runtime/perfData.cpp
hotspot/src/share/vm/runtime/perfData.hpp
hotspot/src/share/vm/runtime/perfMemory.hpp
hotspot/src/share/vm/runtime/reflection.cpp
hotspot/src/share/vm/runtime/reflection.hpp
hotspot/src/share/vm/runtime/registerMap.hpp
hotspot/src/share/vm/runtime/relocator.cpp
hotspot/src/share/vm/runtime/safepoint.cpp
hotspot/src/share/vm/runtime/safepoint.hpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/runtime/sharedRuntime.hpp
hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp
hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp
hotspot/src/share/vm/runtime/signature.cpp
hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp
hotspot/src/share/vm/runtime/statSampler.cpp
hotspot/src/share/vm/runtime/stubCodeGenerator.hpp
hotspot/src/share/vm/runtime/synchronizer.cpp
hotspot/src/share/vm/runtime/synchronizer.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/runtime/unhandledOops.hpp
hotspot/src/share/vm/runtime/vframeArray.hpp
hotspot/src/share/vm/runtime/virtualspace.cpp
hotspot/src/share/vm/runtime/vm_operations.hpp
hotspot/src/share/vm/utilities/globalDefinitions.hpp
--- a/hotspot/src/share/vm/ci/ciField.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/ci/ciField.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -201,16 +201,10 @@
       return;
     }
 
-    // This field just may be constant.  The only cases where it will
-    // not be constant are:
-    //
-    // 1. The field holds a non-perm-space oop.  The field is, strictly
-    //    speaking, constant but we cannot embed non-perm-space oops into
-    //    generated code.  For the time being we need to consider the
-    //    field to be not constant.
-    // 2. The field is a *special* static&final field whose value
-    //    may change.  The three examples are java.lang.System.in,
-    //    java.lang.System.out, and java.lang.System.err.
+    // This field just may be constant.  The only case where it will
+    // not be constant is when the field is a *special* static&final field
+    // whose value may change.  The three examples are java.lang.System.in,
+    // java.lang.System.out, and java.lang.System.err.
 
     KlassHandle k = _holder->get_Klass();
     assert( SystemDictionary::System_klass() != NULL, "Check once per vm");
--- a/hotspot/src/share/vm/ci/ciField.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/ci/ciField.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -130,9 +130,7 @@
   //   1. The field is both static and final
   //   2. The canonical holder of the field has undergone
   //      static initialization.
-  //   3. If the field is an object or array, then the oop
-  //      in question is allocated in perm space.
-  //   4. The field is not one of the special static/final
+  //   3. The field is not one of the special static/final
   //      non-constant fields.  These are java.lang.System.in
   //      and java.lang.System.out.  Abomination.
   //
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -466,7 +466,7 @@
 void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() {
   _STW_timer.stop();
   _latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds();
-  // Start accumumlating time for the remark in the STW timer.
+  // Start accumulating time for the remark in the STW timer.
   _STW_timer.reset();
   _STW_timer.start();
 }
@@ -537,8 +537,8 @@
       avg_msc_pause()->sample(msc_pause_in_seconds);
       double mutator_time_in_seconds = 0.0;
       if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
-        // This assertion may fail because of time stamp gradularity.
-        // Comment it out and investiage it at a later time.  The large
+        // This assertion may fail because of time stamp granularity.
+        // Comment it out and investigate it at a later time.  The large
         // time stamp granularity occurs on some older linux systems.
 #ifndef CLOCK_GRANULARITY_TOO_LARGE
         assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
@@ -836,7 +836,7 @@
 
 void CMSAdaptiveSizePolicy::ms_collection_marking_begin() {
   _STW_timer.stop();
-  // Start accumumlating time for the marking in the STW timer.
+  // Start accumulating time for the marking in the STW timer.
   _STW_timer.reset();
   _STW_timer.start();
 }
@@ -1227,7 +1227,7 @@
     // We use the tenuring threshold to equalize the cost of major
     // and minor collections.
     // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost betweent the
+    // tenuring threshold is to differences in cost between the
     // collection types.
 
     // Get the times of interest. This involves a little work, so
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -356,7 +356,7 @@
   void concurrent_sweeping_begin();
   void concurrent_sweeping_end();
   // Similar to the above (e.g., concurrent_marking_end()) and
-  // is used for both the precleaning an abortable precleaing
+  // is used for both the precleaning an abortable precleaning
   // phases.
   void concurrent_precleaning_begin();
   void concurrent_precleaning_end();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -88,8 +88,7 @@
   // of the tenured generation.
   PerfVariable* _avg_msc_pause_counter;
   // Average for the time between the most recent end of a
-  // MSC collection and the beginning of the next
-  // MSC collection.
+  // MSC collection and the beginning of the next MSC collection.
   PerfVariable* _avg_msc_interval_counter;
   // Average for the GC cost of a MSC collection based on
   // _avg_msc_pause_counter and _avg_msc_interval_counter.
@@ -99,8 +98,7 @@
   // of the tenured generation.
   PerfVariable* _avg_ms_pause_counter;
   // Average for the time between the most recent end of a
-  // MS collection and the beginning of the next
-  // MS collection.
+  // MS collection and the beginning of the next MS collection.
   PerfVariable* _avg_ms_interval_counter;
   // Average for the GC cost of a MS collection based on
   // _avg_ms_pause_counter and _avg_ms_interval_counter.
@@ -108,9 +106,9 @@
 
   // Average of the bytes promoted per minor collection.
   PerfVariable* _promoted_avg_counter;
-  // Average of the deviation of the promoted average
+  // Average of the deviation of the promoted average.
   PerfVariable* _promoted_avg_dev_counter;
-  // Padded average of the bytes promoted per minor colleciton
+  // Padded average of the bytes promoted per minor collection.
   PerfVariable* _promoted_padded_avg_counter;
 
   // See description of the _change_young_gen_for_maj_pauses
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -258,10 +258,10 @@
   bool take_from_overflow_list();
 };
 
-// Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
+// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 // stack and the bitMap are shared, so access needs to be suitably
-// sycnhronized. An OopTaskQueue structure, supporting efficient
-// workstealing, replaces a CMSMarkStack for storing grey objects.
+// synchronized. An OopTaskQueue structure, supporting efficient
+// work stealing, replaces a CMSMarkStack for storing grey objects.
 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
  private:
   MemRegion              _span;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -407,8 +407,8 @@
   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
                        (size_t) SmallForLinearAlloc - 1));
   // XXX the following could potentially be pretty slow;
-  // should one, pesimally for the rare cases when res
-  // caclulated above is less than IndexSetSize,
+  // should one, pessimistically for the rare cases when res
+  // calculated above is less than IndexSetSize,
   // just return res calculated above? My reasoning was that
   // those cases will be so rare that the extra time spent doesn't
   // really matter....
@@ -759,7 +759,7 @@
 // Note on locking for the space iteration functions:
 // since the collector's iteration activities are concurrent with
 // allocation activities by mutators, absent a suitable mutual exclusion
-// mechanism the iterators may go awry. For instace a block being iterated
+// mechanism the iterators may go awry. For instance a block being iterated
 // may suddenly be allocated or divided up and part of it allocated and
 // so on.
 
@@ -2090,7 +2090,7 @@
 
 // Support for concurrent collection policy decisions.
 bool CompactibleFreeListSpace::should_concurrent_collect() const {
-  // In the future we might want to add in frgamentation stats --
+  // In the future we might want to add in fragmentation stats --
   // including erosion of the "mountain" into this decision as well.
   return !adaptive_freelists() && linearAllocationWouldFail();
 }
@@ -2099,7 +2099,7 @@
 
 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
-  // prepare_for_compaction() uses the space between live objects
+  // Prepare_for_compaction() uses the space between live objects
   // so that later phase can skip dead space quickly.  So verification
   // of the free lists doesn't work after.
 }
@@ -2122,7 +2122,7 @@
   SCAN_AND_COMPACT(obj_size);
 }
 
-// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
+// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
 // where fbs is free block sizes
 double CompactibleFreeListSpace::flsFrag() const {
   size_t itabFree = totalSizeInIndexedFreeLists();
@@ -2651,7 +2651,7 @@
   // changes on-the-fly during a scavenge and avoid such a phase-change
   // pothole. The following code is a heuristic attempt to do that.
   // It is protected by a product flag until we have gained
-  // enough experience with this heuristic and fine-tuned its behaviour.
+  // enough experience with this heuristic and fine-tuned its behavior.
   // WARNING: This might increase fragmentation if we overreact to
   // small spikes, so some kind of historical smoothing based on
   // previous experience with the greater reactivity might be useful.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -58,7 +58,7 @@
   HeapWord* _ptr;
   size_t    _word_size;
   size_t    _refillSize;
-  size_t    _allocation_size_limit;  // largest size that will be allocated
+  size_t    _allocation_size_limit;  // Largest size that will be allocated
 
   void print_on(outputStream* st) const;
 };
@@ -116,14 +116,14 @@
 
   PromotionInfo _promoInfo;
 
-  // helps to impose a global total order on freelistLock ranks;
+  // Helps to impose a global total order on freelistLock ranks;
   // assumes that CFLSpace's are allocated in global total order
   static int   _lockRank;
 
-  // a lock protecting the free lists and free blocks;
+  // A lock protecting the free lists and free blocks;
   // mutable because of ubiquity of locking even for otherwise const methods
   mutable Mutex _freelistLock;
-  // locking verifier convenience function
+  // Locking verifier convenience function
   void assert_locked() const PRODUCT_RETURN;
   void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
 
@@ -131,12 +131,13 @@
   LinearAllocBlock _smallLinearAllocBlock;
 
   FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
-  AFLBinaryTreeDictionary* _dictionary;    // ptr to dictionary for large size blocks
+  AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
 
+  // Indexed array for small size blocks
   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
-                                       // indexed array for small size blocks
-  // allocation stategy
-  bool       _fitStrategy;      // Use best fit strategy.
+
+  // Allocation strategy
+  bool       _fitStrategy;        // Use best fit strategy
   bool       _adaptive_freelists; // Use adaptive freelists
 
   // This is an address close to the largest free chunk in the heap.
@@ -157,7 +158,7 @@
 
   // Extra stuff to manage promotion parallelism.
 
-  // a lock protecting the dictionary during par promotion allocation.
+  // A lock protecting the dictionary during par promotion allocation.
   mutable Mutex _parDictionaryAllocLock;
   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
 
@@ -275,26 +276,26 @@
   }
 
  protected:
-  // reset the indexed free list to its initial empty condition.
+  // Reset the indexed free list to its initial empty condition.
   void resetIndexedFreeListArray();
-  // reset to an initial state with a single free block described
+  // Reset to an initial state with a single free block described
   // by the MemRegion parameter.
   void reset(MemRegion mr);
   // Return the total number of words in the indexed free lists.
   size_t     totalSizeInIndexedFreeLists() const;
 
  public:
-  // Constructor...
+  // Constructor
   CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
                            bool use_adaptive_freelists,
                            FreeBlockDictionary<FreeChunk>::DictionaryChoice);
-  // accessors
+  // Accessors
   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
   FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
   HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
   void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
 
-  // Set CMS global values
+  // Set CMS global values.
   static void set_cms_values();
 
   // Return the free chunk at the end of the space.  If no such
@@ -305,7 +306,7 @@
 
   void set_collector(CMSCollector* collector) { _collector = collector; }
 
-  // Support for parallelization of rescan and marking
+  // Support for parallelization of rescan and marking.
   const size_t rescan_task_size()  const { return _rescan_task_size;  }
   const size_t marking_task_size() const { return _marking_task_size; }
   SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
@@ -346,7 +347,7 @@
   // Resizing support
   void set_end(HeapWord* value);  // override
 
-  // mutual exclusion support
+  // Mutual exclusion support
   Mutex* freelistLock() const { return &_freelistLock; }
 
   // Iteration support
@@ -370,7 +371,7 @@
   // If the iteration encounters an unparseable portion of the region,
   // terminate the iteration and return the address of the start of the
   // subregion that isn't done.  Return of "NULL" indicates that the
-  // interation completed.
+  // iteration completed.
   virtual HeapWord*
        object_iterate_careful_m(MemRegion mr,
                                 ObjectClosureCareful* cl);
@@ -393,11 +394,11 @@
   size_t block_size_nopar(const HeapWord* p) const;
   bool block_is_obj_nopar(const HeapWord* p) const;
 
-  // iteration support for promotion
+  // Iteration support for promotion
   void save_marks();
   bool no_allocs_since_save_marks();
 
-  // iteration support for sweeping
+  // Iteration support for sweeping
   void save_sweep_limit() {
     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
                    unallocated_block() : end();
@@ -457,7 +458,7 @@
 
   FreeChunk* allocateScratch(size_t size);
 
-  // returns true if either the small or large linear allocation buffer is empty.
+  // Returns true if either the small or large linear allocation buffer is empty.
   bool       linearAllocationWouldFail() const;
 
   // Adjust the chunk for the minimum size.  This version is called in
@@ -477,18 +478,18 @@
   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
               bool coalesced);
 
-  // Support for decisions regarding concurrent collection policy
+  // Support for decisions regarding concurrent collection policy.
   bool should_concurrent_collect() const;
 
-  // Support for compaction
+  // Support for compaction.
   void prepare_for_compaction(CompactPoint* cp);
   void adjust_pointers();
   void compact();
-  // reset the space to reflect the fact that a compaction of the
+  // Reset the space to reflect the fact that a compaction of the
   // space has been done.
   virtual void reset_after_compaction();
 
-  // Debugging support
+  // Debugging support.
   void print()                            const;
   void print_on(outputStream* st)         const;
   void prepare_for_verify();
@@ -500,7 +501,7 @@
   // i.e. either the binary tree dictionary, the indexed free lists
   // or the linear allocation block.
   bool verify_chunk_in_free_list(FreeChunk* fc) const;
-  // Verify that the given chunk is the linear allocation block
+  // Verify that the given chunk is the linear allocation block.
   bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
   // Do some basic checks on the the free lists.
   void check_free_list_consistency()      const PRODUCT_RETURN;
@@ -516,7 +517,7 @@
     size_t sumIndexedFreeListArrayReturnedBytes();
     // Return the total number of chunks in the indexed free lists.
     size_t totalCountInIndexedFreeLists() const;
-    // Return the total numberof chunks in the space.
+    // Return the total number of chunks in the space.
     size_t totalCount();
   )
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -117,10 +117,10 @@
 // hide the naked CGC_lock manipulation in the baton-passing code
 // further below. That's something we should try to do. Also, the proof
 // of correctness of this 2-level locking scheme is far from obvious,
-// and potentially quite slippery. We have an uneasy supsicion, for instance,
+// and potentially quite slippery. We have an uneasy suspicion, for instance,
 // that there may be a theoretical possibility of delay/starvation in the
 // low-level lock/wait/notify scheme used for the baton-passing because of
-// potential intereference with the priority scheme embodied in the
+// potential interference with the priority scheme embodied in the
 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 // invocation further below and marked with "XXX 20011219YSR".
 // Indeed, as we note elsewhere, this may become yet more slippery
@@ -259,7 +259,7 @@
   // Ideally, in the calculation below, we'd compute the dilatation
   // factor as: MinChunkSize/(promoting_gen's min object size)
   // Since we do not have such a general query interface for the
-  // promoting generation, we'll instead just use the mimimum
+  // promoting generation, we'll instead just use the minimum
   // object size (which today is a header's worth of space);
   // note that all arithmetic is in units of HeapWords.
   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
@@ -274,7 +274,7 @@
 //
 //   Let "f" be MinHeapFreeRatio in
 //
-//    _intiating_occupancy = 100-f +
+//    _initiating_occupancy = 100-f +
 //                           f * (CMSTriggerRatio/100)
 //   where CMSTriggerRatio is the argument "tr" below.
 //
@@ -2671,7 +2671,7 @@
 // that it's responsible for collecting, while itself doing any
 // work common to all generations it's responsible for. A similar
 // comment applies to the  gc_epilogue()'s.
-// The role of the varaible _between_prologue_and_epilogue is to
+// The role of the variable _between_prologue_and_epilogue is to
 // enforce the invocation protocol.
 void CMSCollector::gc_prologue(bool full) {
   // Call gc_prologue_work() for the CMSGen
@@ -2878,10 +2878,10 @@
 // Check reachability of the given heap address in CMS generation,
 // treating all other generations as roots.
 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
-  // We could "guarantee" below, rather than assert, but i'll
+  // We could "guarantee" below, rather than assert, but I'll
   // leave these as "asserts" so that an adventurous debugger
   // could try this in the product build provided some subset of
-  // the conditions were met, provided they were intersted in the
+  // the conditions were met, provided they were interested in the
   // results and knew that the computation below wouldn't interfere
   // with other concurrent computations mutating the structures
   // being read or written.
@@ -2982,7 +2982,7 @@
   // This is as intended, because by this time
   // GC must already have cleared any refs that need to be cleared,
   // and traced those that need to be marked; moreover,
-  // the marking done here is not going to intefere in any
+  // the marking done here is not going to interfere in any
   // way with the marking information used by GC.
   NoRefDiscovery no_discovery(ref_processor());
 
@@ -3000,7 +3000,7 @@
 
   if (CMSRemarkVerifyVariant == 1) {
     // In this first variant of verification, we complete
-    // all marking, then check if the new marks-verctor is
+    // all marking, then check if the new marks-vector is
     // a subset of the CMS marks-vector.
     verify_after_remark_work_1();
   } else if (CMSRemarkVerifyVariant == 2) {
@@ -3399,7 +3399,7 @@
       CMSExpansionCause::_allocate_par_lab);
     // Now go around the loop and try alloc again;
     // A competing par_promote might beat us to the expansion space,
-    // so we may go around the loop again if promotion fails agaion.
+    // so we may go around the loop again if promotion fails again.
     if (GCExpandToAllocateDelayMillis > 0) {
       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
     }
@@ -4370,7 +4370,7 @@
   // should really use wait/notify, which is the recommended
   // way of doing this type of interaction. Additionally, we should
   // consolidate the eight methods that do the yield operation and they
-  // are almost identical into one for better maintenability and
+  // are almost identical into one for better maintainability and
   // readability. See 6445193.
   //
   // Tony 2006.06.29
@@ -4538,7 +4538,7 @@
   // If Eden's current occupancy is below this threshold,
   // immediately schedule the remark; else preclean
   // past the next scavenge in an effort to
-  // schedule the pause as described avove. By choosing
+  // schedule the pause as described above. By choosing
   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
   // we will never do an actual abortable preclean cycle.
   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
@@ -5532,8 +5532,8 @@
   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
   // CAUTION: This closure has state that persists across calls to
   // the work method dirty_range_iterate_clear() in that it has
-  // imbedded in it a (subtype of) UpwardsObjectClosure. The
-  // use of that state in the imbedded UpwardsObjectClosure instance
+  // embedded in it a (subtype of) UpwardsObjectClosure. The
+  // use of that state in the embedded UpwardsObjectClosure instance
   // assumes that the cards are always iterated (even if in parallel
   // by several threads) in monotonically increasing order per each
   // thread. This is true of the implementation below which picks
@@ -5548,7 +5548,7 @@
   // sure that the changes there do not run counter to the
   // assumptions made here and necessary for correctness and
   // efficiency. Note also that this code might yield inefficient
-  // behaviour in the case of very large objects that span one or
+  // behavior in the case of very large objects that span one or
   // more work chunks. Such objects would potentially be scanned
   // several times redundantly. Work on 4756801 should try and
   // address that performance anomaly if at all possible. XXX
@@ -5574,7 +5574,7 @@
 
   while (!pst->is_task_claimed(/* reference */ nth_task)) {
     // Having claimed the nth_task, compute corresponding mem-region,
-    // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
+    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
     // The alignment restriction ensures that we do not need any
     // synchronization with other gang-workers while setting or
     // clearing bits in thus chunk of the MUT.
@@ -6365,7 +6365,7 @@
   _inter_sweep_timer.reset();
   _inter_sweep_timer.start();
 
-  // We need to use a monotonically non-deccreasing time in ms
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -6726,7 +6726,7 @@
     warning("CMS bit map allocation failure");
     return false;
   }
-  // For now we'll just commit all of the bit map up fromt.
+  // For now we'll just commit all of the bit map up front.
   // Later on we'll try to be more parsimonious with swap.
   if (!_virtual_space.initialize(brs, brs.size())) {
     warning("CMS bit map backing store failure");
@@ -6833,8 +6833,8 @@
 
 // XXX FIX ME !!! In the MT case we come in here holding a
 // leaf lock. For printing we need to take a further lock
-// which has lower rank. We need to recallibrate the two
-// lock-ranks involved in order to be able to rpint the
+// which has lower rank. We need to recalibrate the two
+// lock-ranks involved in order to be able to print the
 // messages below. (Or defer the printing to the caller.
 // For now we take the expedient path of just disabling the
 // messages for the problematic case.)
@@ -7174,7 +7174,7 @@
           }
         #endif // ASSERT
     } else {
-      // an unitialized object
+      // An uninitialized object.
       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
       size = pointer_delta(nextOneAddr + 1, addr);
@@ -7182,7 +7182,7 @@
              "alignment problem");
       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
       // will dirty the card when the klass pointer is installed in the
-      // object (signalling the completion of initialization).
+      // object (signaling the completion of initialization).
     }
   } else {
     // Either a not yet marked object or an uninitialized object
@@ -7993,7 +7993,7 @@
          // we need to dirty all of the cards that the object spans,
          // since the rescan of object arrays will be limited to the
          // dirty cards.
-         // Note that no one can be intefering with us in this action
+         // Note that no one can be interfering with us in this action
          // of dirtying the mod union table, so no locking or atomics
          // are required.
          if (obj->is_objArray()) {
@@ -9019,7 +9019,7 @@
 
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
-// spaced simulated oveflows, but that's OK, in fact
+// spaced simulated overflows, but that's OK, in fact
 // probably good as it would exercise the overflow code
 // under contention.
 bool CMSCollector::simulate_overflow() {
@@ -9139,7 +9139,7 @@
       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
     }
   } else {
-    // Chop off the suffix and rerturn it to the global list.
+    // Chop off the suffix and return it to the global list.
     assert(cur->mark() != BUSY, "Error");
     oop suffix_head = cur->mark(); // suffix will be put back on global list
     cur->set_mark(NULL);           // break off suffix
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -171,19 +171,19 @@
 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
 class CMSMarkStack: public CHeapObj<mtGC>  {
   //
-  friend class CMSCollector;   // to get at expasion stats further below
+  friend class CMSCollector;   // To get at expansion stats further below.
   //
 
-  VirtualSpace _virtual_space;  // space for the stack
-  oop*   _base;      // bottom of stack
-  size_t _index;     // one more than last occupied index
-  size_t _capacity;  // max #elements
-  Mutex  _par_lock;  // an advisory lock used in case of parallel access
-  NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
+  VirtualSpace _virtual_space;  // Space for the stack
+  oop*   _base;      // Bottom of stack
+  size_t _index;     // One more than last occupied index
+  size_t _capacity;  // Max #elements
+  Mutex  _par_lock;  // An advisory lock used in case of parallel access
+  NOT_PRODUCT(size_t _max_depth;)  // Max depth plumbed during run
 
  protected:
-  size_t _hit_limit;      // we hit max stack size limit
-  size_t _failed_double;  // we failed expansion before hitting limit
+  size_t _hit_limit;      // We hit max stack size limit
+  size_t _failed_double;  // We failed expansion before hitting limit
 
  public:
   CMSMarkStack():
@@ -238,7 +238,7 @@
     _index = 0;
   }
 
-  // Expand the stack, typically in response to an overflow condition
+  // Expand the stack, typically in response to an overflow condition.
   void expand();
 
   // Compute the least valued stack element.
@@ -250,7 +250,7 @@
      return least;
   }
 
-  // Exposed here to allow stack expansion in || case
+  // Exposed here to allow stack expansion in || case.
   Mutex* par_lock() { return &_par_lock; }
 };
 
@@ -557,7 +557,7 @@
   // Manipulated with CAS in the parallel/multi-threaded case.
   oop _overflow_list;
   // The following array-pair keeps track of mark words
-  // displaced for accomodating overflow list above.
+  // displaced for accommodating overflow list above.
   // This code will likely be revisited under RFE#4922830.
   Stack<oop, mtGC>     _preserved_oop_stack;
   Stack<markOop, mtGC> _preserved_mark_stack;
@@ -599,7 +599,7 @@
   void verify_after_remark_work_1();
   void verify_after_remark_work_2();
 
-  // true if any verification flag is on.
+  // True if any verification flag is on.
   bool _verifying;
   bool verifying() const { return _verifying; }
   void set_verifying(bool v) { _verifying = v; }
@@ -611,9 +611,9 @@
   void set_did_compact(bool v);
 
   // XXX Move these to CMSStats ??? FIX ME !!!
-  elapsedTimer _inter_sweep_timer;   // time between sweeps
-  elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
-  // padded decaying average estimates of the above
+  elapsedTimer _inter_sweep_timer;   // Time between sweeps
+  elapsedTimer _intra_sweep_timer;   // Time _in_ sweeps
+  // Padded decaying average estimates of the above
   AdaptivePaddedAverage _inter_sweep_estimate;
   AdaptivePaddedAverage _intra_sweep_estimate;
 
@@ -632,16 +632,16 @@
   void report_heap_summary(GCWhen::Type when);
 
  protected:
-  ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
-  MemRegion                      _span;    // span covering above two
-  CardTableRS*                   _ct;      // card table
+  ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
+  MemRegion                      _span;    // Span covering above two
+  CardTableRS*                   _ct;      // Card table
 
   // CMS marking support structures
   CMSBitMap     _markBitMap;
   CMSBitMap     _modUnionTable;
   CMSMarkStack  _markStack;
 
-  HeapWord*     _restart_addr; // in support of marking stack overflow
+  HeapWord*     _restart_addr; // In support of marking stack overflow
   void          lower_restart_addr(HeapWord* low);
 
   // Counters in support of marking stack / work queue overflow handling:
@@ -656,12 +656,12 @@
   size_t        _par_kac_ovflw;
   NOT_PRODUCT(ssize_t _num_par_pushes;)
 
-  // ("Weak") Reference processing support
+  // ("Weak") Reference processing support.
   ReferenceProcessor*            _ref_processor;
   CMSIsAliveClosure              _is_alive_closure;
-      // keep this textually after _markBitMap and _span; c'tor dependency
+  // Keep this textually after _markBitMap and _span; c'tor dependency.
 
-  ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
+  ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
   ModUnionClosure    _modUnionClosure;
   ModUnionClosurePar _modUnionClosurePar;
 
@@ -697,7 +697,7 @@
   // State related to prologue/epilogue invocation for my generations
   bool _between_prologue_and_epilogue;
 
-  // Signalling/State related to coordination between fore- and backgroud GC
+  // Signaling/State related to coordination between fore- and background GC
   // Note: When the baton has been passed from background GC to foreground GC,
   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
@@ -712,13 +712,13 @@
   int    _numYields;
   size_t _numDirtyCards;
   size_t _sweep_count;
-  // number of full gc's since the last concurrent gc.
+  // Number of full gc's since the last concurrent gc.
   uint   _full_gcs_since_conc_gc;
 
-  // occupancy used for bootstrapping stats
+  // Occupancy used for bootstrapping stats
   double _bootstrap_occupancy;
 
-  // timer
+  // Timer
   elapsedTimer _timer;
 
   // Timing, allocation and promotion statistics, used for scheduling.
@@ -770,7 +770,7 @@
                                    int no_of_gc_threads);
   void push_on_overflow_list(oop p);
   void par_push_on_overflow_list(oop p);
-  // the following is, obviously, not, in general, "MT-stable"
+  // The following is, obviously, not, in general, "MT-stable"
   bool overflow_list_is_empty() const;
 
   void preserve_mark_if_necessary(oop p);
@@ -778,24 +778,24 @@
   void preserve_mark_work(oop p, markOop m);
   void restore_preserved_marks_if_any();
   NOT_PRODUCT(bool no_preserved_marks() const;)
-  // in support of testing overflow code
+  // In support of testing overflow code
   NOT_PRODUCT(int _overflow_counter;)
-  NOT_PRODUCT(bool simulate_overflow();)       // sequential
+  NOT_PRODUCT(bool simulate_overflow();)       // Sequential
   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 
   // CMS work methods
-  void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
+  void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
 
-  // a return value of false indicates failure due to stack overflow
-  bool markFromRootsWork(bool asynch);  // concurrent marking work
+  // A return value of false indicates failure due to stack overflow
+  bool markFromRootsWork(bool asynch);  // Concurrent marking work
 
  public:   // FIX ME!!! only for testing
-  bool do_marking_st(bool asynch);      // single-threaded marking
-  bool do_marking_mt(bool asynch);      // multi-threaded  marking
+  bool do_marking_st(bool asynch);      // Single-threaded marking
+  bool do_marking_mt(bool asynch);      // Multi-threaded  marking
 
  private:
 
-  // concurrent precleaning work
+  // Concurrent precleaning work
   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
@@ -811,26 +811,26 @@
   // Resets (i.e. clears) the per-thread plab sample vectors
   void reset_survivor_plab_arrays();
 
-  // final (second) checkpoint work
+  // Final (second) checkpoint work
   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
                                 bool init_mark_was_synchronous);
-  // work routine for parallel version of remark
+  // Work routine for parallel version of remark
   void do_remark_parallel();
-  // work routine for non-parallel version of remark
+  // Work routine for non-parallel version of remark
   void do_remark_non_parallel();
-  // reference processing work routine (during second checkpoint)
+  // Reference processing work routine (during second checkpoint)
   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
 
-  // concurrent sweeping work
+  // Concurrent sweeping work
   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
 
-  // (concurrent) resetting of support data structures
+  // (Concurrent) resetting of support data structures
   void reset(bool asynch);
 
   // Clear _expansion_cause fields of constituent generations
   void clear_expansion_cause();
 
-  // An auxilliary method used to record the ends of
+  // An auxiliary method used to record the ends of
   // used regions of each generation to limit the extent of sweep
   void save_sweep_limits();
 
@@ -854,7 +854,7 @@
   bool is_external_interruption();
   void report_concurrent_mode_interruption();
 
-  // If the backgrould GC is active, acquire control from the background
+  // If the background GC is active, acquire control from the background
   // GC and do the collection.
   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 
@@ -893,7 +893,7 @@
 
   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 
-  // locking checks
+  // Locking checks
   NOT_PRODUCT(static bool have_cms_token();)
 
   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
@@ -958,7 +958,7 @@
   CMSBitMap* markBitMap()  { return &_markBitMap; }
   void directAllocated(HeapWord* start, size_t size);
 
-  // main CMS steps and related support
+  // Main CMS steps and related support
   void checkpointRootsInitial(bool asynch);
   bool markFromRoots(bool asynch);  // a return value of false indicates failure
                                     // due to stack overflow
@@ -977,7 +977,7 @@
   // Performance Counter Support
   CollectorCounters* counters()    { return _gc_counters; }
 
-  // timer stuff
+  // Timer stuff
   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
@@ -1014,18 +1014,18 @@
 
   static void print_on_error(outputStream* st);
 
-  // debugging
+  // Debugging
   void verify();
   bool verify_after_remark(bool silent = VerifySilently);
   void verify_ok_to_terminate() const PRODUCT_RETURN;
   void verify_work_stacks_empty() const PRODUCT_RETURN;
   void verify_overflow_empty() const PRODUCT_RETURN;
 
-  // convenience methods in support of debugging
+  // Convenience methods in support of debugging
   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
 
-  // accessors
+  // Accessors
   CMSMarkStack* verification_mark_stack() { return &_markStack; }
   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
 
@@ -1109,7 +1109,7 @@
 
   CollectionTypes _debug_collection_type;
 
-  // True if a compactiing collection was done.
+  // True if a compacting collection was done.
   bool _did_compact;
   bool did_compact() { return _did_compact; }
 
@@ -1203,7 +1203,7 @@
 
   // Support for compaction
   CompactibleSpace* first_compaction_space() const;
-  // Adjust quantites in the generation affected by
+  // Adjust quantities in the generation affected by
   // the compaction.
   void reset_after_compaction();
 
@@ -1301,7 +1301,7 @@
   void setNearLargestChunk();
   bool isNearLargestChunk(HeapWord* addr);
 
-  // Get the chunk at the end of the space.  Delagates to
+  // Get the chunk at the end of the space.  Delegates to
   // the space.
   FreeChunk* find_chunk_at_end();
 
@@ -1422,7 +1422,6 @@
 // marking from the roots following the first checkpoint.
 // XXX This should really be a subclass of The serial version
 // above, but i have not had the time to refactor things cleanly.
-// That willbe done for Dolphin.
 class Par_MarkFromRootsClosure: public BitMapClosure {
   CMSCollector*  _collector;
   MemRegion      _whole_span;
@@ -1780,7 +1779,7 @@
   void do_already_free_chunk(FreeChunk *fc);
   // Work method called when processing an already free or a
   // freshly garbage chunk to do a lookahead and possibly a
-  // premptive flush if crossing over _limit.
+  // preemptive flush if crossing over _limit.
   void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
   // Process a garbage chunk during sweeping.
   size_t do_garbage_chunk(FreeChunk *fc);
@@ -1879,7 +1878,7 @@
 };
 
 // Allow yielding or short-circuiting of reference list
-// prelceaning work.
+// precleaning work.
 class CMSPrecleanRefsYieldClosure: public YieldClosure {
   CMSCollector* _collector;
   void do_yield_work();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -197,13 +197,13 @@
 }
 
 
-// Return the HeapWord address corrsponding to the next "0" bit
+// Return the HeapWord address corresponding to the next "0" bit
 // (inclusive).
 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
   return getNextUnmarkedWordAddress(addr, endWord());
 }
 
-// Return the HeapWord address corrsponding to the next "0" bit
+// Return the HeapWord address corresponding to the next "0" bit
 // (inclusive).
 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
   HeapWord* start_addr, HeapWord* end_addr) const {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -164,7 +164,7 @@
   //  _pending_yields that holds the sum (of both sync and async requests), and
   //  a second counter _pending_decrements that only holds the async requests,
   //  for greater efficiency, since in a typical CMS run, there are many more
-  //  pontential (i.e. static) yield points than there are actual
+  //  potential (i.e. static) yield points than there are actual
   //  (i.e. dynamic) yields because of requests, which are few and far between.
   //
   // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -279,7 +279,7 @@
 // When _spoolTail is NULL, then the set of slots with displaced headers
 // is all those starting at the slot <_spoolHead, _firstIndex> and
 // going up to the last slot of last block in the linked list.
-// In this lartter case, _splice_point points to the tail block of
+// In this latter case, _splice_point points to the tail block of
 // this linked list of blocks holding displaced headers.
 void PromotionInfo::verify() const {
   // Verify the following:
--- a/hotspot/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -39,7 +39,7 @@
 // up, the wrapped closure is applied to all elements, keeping track of
 // this elapsed time of this process, and leaving the array empty.
 // The caller must be sure to call "done" to process any unprocessed
-// buffered entriess.
+// buffered entries.
 
 class Generation;
 class HeapRegion;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -33,7 +33,7 @@
   _threads(NULL), _n_threads(0),
   _hot_card_cache(g1h)
 {
-  // Ergomonically select initial concurrent refinement parameters
+  // Ergonomically select initial concurrent refinement parameters
   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -44,8 +44,8 @@
   _vtime_accum(0.0)
 {
 
-  // Each thread has its own monitor. The i-th thread is responsible for signalling
-  // to thread i+1 if the number of buffers in the queue exceeds a threashold for this
+  // Each thread has its own monitor. The i-th thread is responsible for signaling
+  // to thread i+1 if the number of buffers in the queue exceeds a threshold for this
   // thread. Monitors are also used to wake up the threads during termination.
   // The 0th worker in notified by mutator threads and has a special monitor.
   // The last worker is used for young gen rset size sampling.
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -909,7 +909,7 @@
   }
 #endif
 
-  // Initialise marking structures. This has to be done in a STW phase.
+  // Initialize marking structures. This has to be done in a STW phase.
   reset();
 
   // For each region note start of marking.
@@ -923,8 +923,8 @@
 
   // If we force an overflow during remark, the remark operation will
   // actually abort and we'll restart concurrent marking. If we always
-  // force an oveflow during remark we'll never actually complete the
-  // marking phase. So, we initilize this here, at the start of the
+  // force an overflow during remark we'll never actually complete the
+  // marking phase. So, we initialize this here, at the start of the
   // cycle, so that at the remaining overflow number will decrease at
   // every remark and we'll eventually not need to cause one.
   force_overflow_stw()->init();
@@ -959,7 +959,7 @@
  *
  * Note, however, that this code is also used during remark and in
  * this case we should not attempt to leave / enter the STS, otherwise
- * we'll either hit an asseert (debug / fastdebug) or deadlock
+ * we'll either hit an assert (debug / fastdebug) or deadlock
  * (product). So we should only leave / enter the STS if we are
  * operating concurrently.
  *
@@ -1001,7 +1001,7 @@
       // task 0 is responsible for clearing the global data structures
       // We should be here because of an overflow. During STW we should
       // not clear the overflow flag since we rely on it being true when
-      // we exit this method to abort the pause and restart concurent
+      // we exit this method to abort the pause and restart concurrent
       // marking.
       reset_marking_state(true /* clear_overflow */);
       force_overflow()->update();
@@ -1251,7 +1251,7 @@
   CMConcurrentMarkingTask markingTask(this, cmThread());
   if (use_parallel_marking_threads()) {
     _parallel_workers->set_active_workers((int)active_workers);
-    // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
+    // Don't set _n_par_threads because it affects MT in process_strong_roots()
     // and the decisions on that MT processing is made elsewhere.
     assert(_parallel_workers->active_workers() > 0, "Should have been set");
     _parallel_workers->run_task(&markingTask);
@@ -1484,7 +1484,7 @@
     }
 
     // Set the marked bytes for the current region so that
-    // it can be queried by a calling verificiation routine
+    // it can be queried by a calling verification routine
     _region_marked_bytes = marked_bytes;
 
     return false;
@@ -2306,7 +2306,7 @@
       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
       //
       // CMTask::do_marking_step() is called in a loop, which we'll exit
-      // if there's nothing more to do (i.e. we'completely drained the
+      // if there's nothing more to do (i.e. we've completely drained the
       // entries that were pushed as a a result of applying the 'keep alive'
       // closure to the entries on the discovered ref lists) or we overflow
       // the global marking stack.
@@ -2469,7 +2469,7 @@
     // reference processing is not multi-threaded and is thus
     // performed by the current thread instead of a gang worker).
     //
-    // The gang tasks involved in parallel reference procssing create
+    // The gang tasks involved in parallel reference processing create
     // their own instances of these closures, which do their own
     // synchronization among themselves.
     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
@@ -2546,7 +2546,7 @@
 public:
   void work(uint worker_id) {
     // Since all available tasks are actually started, we should
-    // only proceed if we're supposed to be actived.
+    // only proceed if we're supposed to be active.
     if (worker_id < _cm->active_tasks()) {
       CMTask* task = _cm->task(worker_id);
       task->record_start_time();
@@ -3066,7 +3066,7 @@
 
     // 'start' should be in the heap.
     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
-    // 'end' *may* be just beyone the end of the heap (if hr is the last region)
+    // 'end' *may* be just beyond the end of the heap (if hr is the last region)
     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
 
     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
@@ -4414,7 +4414,7 @@
       // overflow was raised. This means we have to restart the
       // marking phase and start iterating over regions. However, in
       // order to do this we have to make sure that all tasks stop
-      // what they are doing and re-initialise in a safe manner. We
+      // what they are doing and re-initialize in a safe manner. We
       // will achieve this with the use of two barrier sync points.
 
       if (_cm->verbose_low()) {
@@ -4428,7 +4428,7 @@
 
         // When we exit this sync barrier we know that all tasks have
         // stopped doing marking work. So, it's now safe to
-        // re-initialise our data structures. At the end of this method,
+        // re-initialize our data structures. At the end of this method,
         // task 0 will clear the global data structures.
       }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -378,19 +378,19 @@
   friend class G1CMDrainMarkingStackClosure;
 
 protected:
-  ConcurrentMarkThread* _cmThread;   // the thread doing the work
-  G1CollectedHeap*      _g1h;        // the heap.
-  uint                  _parallel_marking_threads; // the number of marking
-                                                   // threads we're use
-  uint                  _max_parallel_marking_threads; // max number of marking
-                                                   // threads we'll ever use
-  double                _sleep_factor; // how much we have to sleep, with
+  ConcurrentMarkThread* _cmThread;   // The thread doing the work
+  G1CollectedHeap*      _g1h;        // The heap
+  uint                  _parallel_marking_threads; // The number of marking
+                                                   // threads we're using
+  uint                  _max_parallel_marking_threads; // Max number of marking
+                                                       // threads we'll ever use
+  double                _sleep_factor; // How much we have to sleep, with
                                        // respect to the work we just did, to
                                        // meet the marking overhead goal
-  double                _marking_task_overhead; // marking target overhead for
+  double                _marking_task_overhead; // Marking target overhead for
                                                 // a single task
 
-  // same as the two above, but for the cleanup task
+  // Same as the two above, but for the cleanup task
   double                _cleanup_sleep_factor;
   double                _cleanup_task_overhead;
 
@@ -399,8 +399,8 @@
   // Concurrent marking support structures
   CMBitMap                _markBitMap1;
   CMBitMap                _markBitMap2;
-  CMBitMapRO*             _prevMarkBitMap; // completed mark bitmap
-  CMBitMap*               _nextMarkBitMap; // under-construction mark bitmap
+  CMBitMapRO*             _prevMarkBitMap; // Completed mark bitmap
+  CMBitMap*               _nextMarkBitMap; // Under-construction mark bitmap
 
   BitMap                  _region_bm;
   BitMap                  _card_bm;
@@ -409,43 +409,43 @@
   HeapWord*               _heap_start;
   HeapWord*               _heap_end;
 
-  // Root region tracking and claiming.
+  // Root region tracking and claiming
   CMRootRegions           _root_regions;
 
   // For gray objects
-  CMMarkStack             _markStack; // Grey objects behind global finger.
-  HeapWord* volatile      _finger;  // the global finger, region aligned,
+  CMMarkStack             _markStack; // Grey objects behind global finger
+  HeapWord* volatile      _finger;  // The global finger, region aligned,
                                     // always points to the end of the
                                     // last claimed region
 
-  // marking tasks
-  uint                    _max_worker_id;// maximum worker id
-  uint                    _active_tasks; // task num currently active
-  CMTask**                _tasks;        // task queue array (max_worker_id len)
-  CMTaskQueueSet*         _task_queues;  // task queue set
-  ParallelTaskTerminator  _terminator;   // for termination
+  // Marking tasks
+  uint                    _max_worker_id;// Maximum worker id
+  uint                    _active_tasks; // Task num currently active
+  CMTask**                _tasks;        // Task queue array (max_worker_id len)
+  CMTaskQueueSet*         _task_queues;  // Task queue set
+  ParallelTaskTerminator  _terminator;   // For termination
 
-  // Two sync barriers that are used to synchronise tasks when an
+  // Two sync barriers that are used to synchronize tasks when an
   // overflow occurs. The algorithm is the following. All tasks enter
   // the first one to ensure that they have all stopped manipulating
-  // the global data structures. After they exit it, they re-initialise
-  // their data structures and task 0 re-initialises the global data
+  // the global data structures. After they exit it, they re-initialize
+  // their data structures and task 0 re-initializes the global data
   // structures. Then, they enter the second sync barrier. This
   // ensure, that no task starts doing work before all data
-  // structures (local and global) have been re-initialised. When they
+  // structures (local and global) have been re-initialized. When they
   // exit it, they are free to start working again.
   WorkGangBarrierSync     _first_overflow_barrier_sync;
   WorkGangBarrierSync     _second_overflow_barrier_sync;
 
-  // this is set by any task, when an overflow on the global data
-  // structures is detected.
+  // This is set by any task, when an overflow on the global data
+  // structures is detected
   volatile bool           _has_overflown;
-  // true: marking is concurrent, false: we're in remark
+  // True: marking is concurrent, false: we're in remark
   volatile bool           _concurrent;
-  // set at the end of a Full GC so that marking aborts
+  // Set at the end of a Full GC so that marking aborts
   volatile bool           _has_aborted;
 
-  // used when remark aborts due to an overflow to indicate that
+  // Used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
   volatile bool           _restart_for_overflow;
 
@@ -455,10 +455,10 @@
   // time of remark.
   volatile bool           _concurrent_marking_in_progress;
 
-  // verbose level
+  // Verbose level
   CMVerboseLevel          _verbose_level;
 
-  // All of these times are in ms.
+  // All of these times are in ms
   NumberSeq _init_times;
   NumberSeq _remark_times;
   NumberSeq   _remark_mark_times;
@@ -467,7 +467,7 @@
   double    _total_counting_time;
   double    _total_rs_scrub_time;
 
-  double*   _accum_task_vtime;   // accumulated task vtime
+  double*   _accum_task_vtime;   // Accumulated task vtime
 
   FlexibleWorkGang* _parallel_workers;
 
@@ -487,7 +487,7 @@
   void reset_marking_state(bool clear_overflow = true);
 
   // We do this after we're done with marking so that the marking data
-  // structures are initialised to a sensible and predictable state.
+  // structures are initialized to a sensible and predictable state.
   void set_non_marking_state();
 
   // Called to indicate how many threads are currently active.
@@ -497,14 +497,14 @@
   // mark or remark) and how many threads are currently active.
   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 
-  // prints all gathered CM-related statistics
+  // Prints all gathered CM-related statistics
   void print_stats();
 
   bool cleanup_list_is_empty() {
     return _cleanup_list.is_empty();
   }
 
-  // accessor methods
+  // Accessor methods
   uint parallel_marking_threads() const     { return _parallel_marking_threads; }
   uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
   double sleep_factor()                     { return _sleep_factor; }
@@ -542,7 +542,7 @@
   // frequently.
   HeapRegion* claim_region(uint worker_id);
 
-  // It determines whether we've run out of regions to scan.
+  // It determines whether we've run out of regions to scan
   bool        out_of_regions() { return _finger == _heap_end; }
 
   // Returns the task with the given id
@@ -816,7 +816,7 @@
   inline bool do_yield_check(uint worker_i = 0);
   inline bool should_yield();
 
-  // Called to abort the marking cycle after a Full GC takes palce.
+  // Called to abort the marking cycle after a Full GC takes place.
   void abort();
 
   bool has_aborted()      { return _has_aborted; }
@@ -933,11 +933,11 @@
 
   // Similar to the above routine but there are times when we cannot
   // safely calculate the size of obj due to races and we, therefore,
-  // pass the size in as a parameter. It is the caller's reponsibility
+  // pass the size in as a parameter. It is the caller's responsibility
   // to ensure that the size passed in for obj is valid.
   inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
 
-  // Unconditionally mark the given object, and unconditinally count
+  // Unconditionally mark the given object, and unconditionally count
   // the object in the counting structures for worker id 0.
   // Should *not* be called from parallel code.
   inline bool mark_and_count(oop obj, HeapRegion* hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -105,7 +105,7 @@
   // will then correspond to a (non-existent) card that is also
   // just beyond the heap.
   if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
-    // end of region is not card aligned - incremement to cover
+    // end of region is not card aligned - increment to cover
     // all the cards spanned by the region.
     end_idx += 1;
   }
@@ -222,7 +222,7 @@
   return false;
 }
 
-// Unconditionally mark the given object, and unconditinally count
+// Unconditionally mark the given object, and unconditionally count
 // the object in the counting structures for worker id 0.
 // Should *not* be called from parallel code.
 inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -70,7 +70,7 @@
 
 inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
                                                           bool bot_updates) {
-  // First we have to tedo the allocation, assuming we're holding the
+  // First we have to redo the allocation, assuming we're holding the
   // appropriate lock, in case another thread changed the region while
   // we were waiting to get the lock.
   HeapWord* result = attempt_allocation(word_size, bot_updates);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -448,7 +448,7 @@
 
   // Otherwise, find the block start using the table, but taking
   // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themsleves.
+  // on the cards themselves.
   size_t index = _array->index_for(addr);
   assert(_array->address_for_index(index) == addr,
          "arg should be start of card");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -169,7 +169,7 @@
 
     // We use the last address in hr as hr could be the
     // last region in the heap. In which case trying to find
-    // the card for hr->end() will be an OOB accesss to the
+    // the card for hr->end() will be an OOB access to the
     // card table.
     HeapWord* last = hr->end() - 1;
     assert(_g1h->g1_committed().contains(last),
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -1373,7 +1373,7 @@
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
   // overpartition factor, currently 4).  Assumes that this will be called
-  // in parallel by ParallelGCThreads worker threads with discinct worker
+  // in parallel by ParallelGCThreads worker threads with distinct worker
   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
   // calls will use the same "claim_value", and that that claim value is
   // different from the claim_value of any heap region before the start of
@@ -1518,7 +1518,7 @@
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
     // Note this has to be strictly greater-than as the TLABs
-    // are capped at the humongous thresold and we want to
+    // are capped at the humongous threshold and we want to
     // ensure that we don't try to allocate a TLAB as
     // humongous and that we don't allocate a humongous
     // object in a TLAB.
@@ -1648,24 +1648,24 @@
 
   // Optimized nmethod scanning support routines
 
-  // Register the given nmethod with the G1 heap
+  // Register the given nmethod with the G1 heap.
   virtual void register_nmethod(nmethod* nm);
 
-  // Unregister the given nmethod from the G1 heap
+  // Unregister the given nmethod from the G1 heap.
   virtual void unregister_nmethod(nmethod* nm);
 
   // Migrate the nmethods in the code root lists of the regions
   // in the collection set to regions in to-space. In the event
   // of an evacuation failure, nmethods that reference objects
-  // that were not successfullly evacuated are not migrated.
+  // that were not successfully evacuated are not migrated.
   void migrate_strong_code_roots();
 
   // During an initial mark pause, mark all the code roots that
   // point into regions *not* in the collection set.
   void mark_strong_code_roots(uint worker_id);
 
-  // Rebuild the stong code root lists for each region
-  // after a full GC
+  // Rebuild the strong code root lists for each region
+  // after a full GC.
   void rebuild_strong_code_roots();
 
   // Delete entries for dead interned string and clean up unreferenced symbols
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -1075,7 +1075,7 @@
   }
 
   _short_lived_surv_rate_group->start_adding_regions();
-  // do that for any other surv rate groupsx
+  // Do that for any other surv rate groups
 
   if (update_stats) {
     double cost_per_card_ms = 0.0;
@@ -1741,7 +1741,7 @@
   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
   _inc_cset_bytes_used_before += used_bytes;
 
-  // Cache the values we have added to the aggregated informtion
+  // Cache the values we have added to the aggregated information
   // in the heap region in case we have to remove this region from
   // the incremental collection set, or it is updated by the
   // rset sampling code
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -116,7 +116,7 @@
 // If only -XX:NewRatio is set we should use the specified ratio of the heap
 // as both min and max. This will be interpreted as "fixed" just like the
 // NewSize==MaxNewSize case above. But we will update the min and max
-// everytime the heap size changes.
+// every time the heap size changes.
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
@@ -523,9 +523,9 @@
   // synchronize updates to this field.
   size_t _inc_cset_recorded_rs_lengths;
 
-  // A concurrent refinement thread periodcially samples the young
+  // A concurrent refinement thread periodically samples the young
   // region RSets and needs to update _inc_cset_recorded_rs_lengths as
-  // the RSets grow. Instead of having to syncronize updates to that
+  // the RSets grow. Instead of having to synchronize updates to that
   // field we accumulate them in this field and add it to
   // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
   ssize_t _inc_cset_recorded_rs_lengths_diffs;
@@ -604,7 +604,7 @@
   // Calculate and return the maximum young list target length that
   // can fit into the pause time goal. The parameters are: rs_lengths
   // represent the prediction of how large the young RSet lengths will
-  // be, base_min_length is the alreay existing number of regions in
+  // be, base_min_length is the already existing number of regions in
   // the young list, min_length and max_length are the desired min and
   // max young list length according to the user's inputs.
   uint calculate_young_list_target_length(size_t rs_lengths,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -103,7 +103,7 @@
   // The data structure implemented is a circular queue.
   // Head "points" to the most recent addition, tail to the oldest one.
   // The array is of fixed size and I don't think we'll need more than
-  // two or three entries with the current behaviour of G1 pauses.
+  // two or three entries with the current behavior of G1 pauses.
   // If the array is full, an easy fix is to look for the pauses with
   // the shortest gap between them and consolidate them.
   // For now, we have taken the expedient alternative of forgetting
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -176,7 +176,7 @@
     // any hash values from the mark word. These hash values are
     // used when verifying the dictionaries and so removing them
     // from the mark word can make verification of the dictionaries
-    // fail. At the end of the GC, the orginal mark word values
+    // fail. At the end of the GC, the original mark word values
     // (including hash values) are restored to the appropriate
     // objects.
     if (!VerifySilently) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -112,7 +112,7 @@
   // take_sample() only returns "used".  When sampling was used, there
   // were some anomolous values emitted which may have been the consequence
   // of not updating all values simultaneously (i.e., see the calculation done
-  // in eden_space_used(), is it possbile that the values used to
+  // in eden_space_used(), is it possible that the values used to
   // calculate either eden_used or survivor_used are being updated by
   // the collector when the sample is being done?).
   const bool sampled = false;
@@ -135,7 +135,7 @@
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
-  //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
+  //  The "0, 3" are parameters for the n-th generation (=0) with 3 spaces.
   // See  _old_collection_counters for additional counters
   _young_collection_counters = new G1YoungGenerationCounters(this, "young");
 
@@ -254,7 +254,7 @@
     eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
     eden_counters()->update_used(eden_space_used());
     // only the to survivor space (s1) is active, so we don't need to
-    // update the counteres for the from survivor space (s0)
+    // update the counters for the from survivor space (s0)
     to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
     to_counters()->update_used(survivor_space_used());
     old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -108,7 +108,7 @@
 // is that all the above sizes need to be recalculated when the old
 // gen changes capacity (after a GC or after a humongous allocation)
 // but only the eden occupancy changes when a new eden region is
-// allocated. So, in the latter case we have minimal recalcuation to
+// allocated. So, in the latter case we have minimal recalculation to
 // do which is important as we want to keep the eden region allocation
 // path as low-overhead as possible.
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -177,7 +177,7 @@
     // The _record_refs_into_cset flag is true during the RSet
     // updating part of an evacuation pause. It is false at all
     // other times:
-    //  * rebuilding the rembered sets after a full GC
+    //  * rebuilding the remembered sets after a full GC
     //  * during concurrent refinement.
     //  * updating the remembered sets of regions in the collection
     //    set in the event of an evacuation failure (when deferred
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -195,7 +195,7 @@
     HeapRegionRemSetIterator iter(hrrs);
     size_t card_index;
 
-    // We claim cards in block so as to recude the contention. The block size is determined by
+    // We claim cards in block so as to reduce the contention. The block size is determined by
     // the G1RSetScanBlockSize parameter.
     size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
     for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
@@ -587,7 +587,7 @@
 
   // While we are processing RSet buffers during the collection, we
   // actually don't want to scan any cards on the collection set,
-  // since we don't want to update remebered sets with entries that
+  // since we don't want to update remembered sets with entries that
   // point into the collection set, given that live objects from the
   // collection set are about to move and such entries will be stale
   // very soon. This change also deals with a reliability issue which
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -1027,7 +1027,7 @@
       }
     }
 
-    // Loook up end - 1
+    // Look up end - 1
     HeapWord* addr_4 = the_end - 1;
     HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
     if (b_start_4 != p) {
@@ -1111,7 +1111,7 @@
     // will be false, and it will pick up top() as the high water mark
     // of region. If it does so after _gc_time_stamp = ..., then it
     // will pick up the right saved_mark_word() as the high water mark
-    // of the region. Either way, the behaviour will be correct.
+    // of the region. Either way, the behavior will be correct.
     ContiguousSpace::set_saved_mark();
     OrderAccess::storestore();
     _gc_time_stamp = curr_gc_time_stamp;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -97,7 +97,7 @@
   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
  public:
-  // Empty contructor, we'll initialize it with the initialize() method.
+  // Empty constructor, we'll initialize it with the initialize() method.
   HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
 
   void initialize(HeapWord* bottom, HeapWord* end);
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -71,7 +71,7 @@
   assert(_lock->owned_by_self(), "Required.");
 
   // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
-  // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
+  // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
   // have the same rank and we may get the "possible deadlock" message
   _lock->unlock();
 
@@ -151,7 +151,7 @@
 
       // The current PtrQ may be the shared dirty card queue and
       // may be being manipulated by more than one worker thread
-      // during a pause. Since the enqueuing of the completed
+      // during a pause. Since the enqueueing of the completed
       // buffer unlocks the Shared_DirtyCardQ_lock more than one
       // worker thread can 'race' on reading the shared queue attributes
       // (_buf and _index) and multiple threads can call into this
@@ -170,7 +170,7 @@
 
       locking_enqueue_completed_buffer(buf);  // enqueue completed buffer
 
-      // While the current thread was enqueuing the buffer another thread
+      // While the current thread was enqueueing the buffer another thread
       // may have a allocated a new buffer and inserted it into this pointer
       // queue. If that happens then we just return so that the current
       // thread doesn't overwrite the buffer allocated by the other thread
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -144,7 +144,7 @@
 
   // Attempts to ensure that the given card_index in the given region is in
   // the sparse table.  If successful (because the card was already
-  // present, or because it was successfullly added) returns "true".
+  // present, or because it was successfully added) returns "true".
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
@@ -201,8 +201,7 @@
   bool has_next(size_t& card_index);
 };
 
-// Concurrent accesss to a SparsePRT must be serialized by some external
-// mutex.
+// Concurrent access to a SparsePRT must be serialized by some external mutex.
 
 class SparsePRTIter;
 class SparsePRTCleanupTask;
@@ -248,7 +247,7 @@
 
   // Attempts to ensure that the given card_index in the given region is in
   // the sparse table.  If successful (because the card was already
-  // present, or because it was successfullly added) returns "true".
+  // present, or because it was successfully added) returns "true".
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -154,7 +154,7 @@
   // There used to be this guarantee there.
   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
   // Code below forces this requirement.  In addition the desired eden
-  // size and disired survivor sizes are desired goals and may
+  // size and desired survivor sizes are desired goals and may
   // exceed the total generation size.
 
   assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -213,7 +213,7 @@
       && sp->block_is_obj(first_block)      // first block is an object
       && !(oop(first_block)->is_objArray()  // first block is not an array (arrays are precisely dirtied)
            || oop(first_block)->is_typeArray())) {
-    // Find our least non-clean card, so that a left neighbour
+    // Find our least non-clean card, so that a left neighbor
     // does not scan an object straddling the mutual boundary
     // too far to the right, and attempt to scan a portion of
     // that object twice.
@@ -247,14 +247,14 @@
     } NOISY(else {
       tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
       // In the future, we could have this thread look for a non-NULL value to copy from its
-      // right neighbour (up to the end of the first object).
+      // right neighbor (up to the end of the first object).
       if (last_card_of_cur_chunk < last_card_of_first_obj) {
         tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
                       "   might be efficient to get value from right neighbour?");
       }
     })
   } else {
-    // In this case we can help our neighbour by just asking them
+    // In this case we can help our neighbor by just asking them
     // to stop at our first card (even though it may not be dirty).
     NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
     assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -1070,7 +1070,7 @@
     size_policy->avg_survived()->sample(from()->used());
   }
 
-  // We need to use a monotonically non-deccreasing time in ms
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -1402,7 +1402,7 @@
 #ifndef PRODUCT
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
-// spaced simulated oveflows, but that's OK, in fact
+// spaced simulated overflows, but that's OK, in fact
 // probably good as it would exercise the overflow code
 // under contention.
 bool ParNewGeneration::should_simulate_overflow() {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -118,8 +118,8 @@
 
 
 // Make checks on the current sizes of the generations and
-// the contraints on the sizes of the generations.  Push
-// up the boundary within the contraints.  A partial
+// the constraints on the sizes of the generations.  Push
+// up the boundary within the constraints.  A partial
 // push can occur.
 void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
   assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -69,7 +69,7 @@
   // the available space and attempt to move the boundary if more space
   // is needed.  The growth is not guaranteed to occur.
   void adjust_boundary_for_old_gen_needs(size_t desired_change_in_bytes);
-  // Similary for a growth of the young generation.
+  // Similarly for a growth of the young generation.
   void adjust_boundary_for_young_gen_needs(size_t eden_size, size_t survivor_size);
 
   // Return the total byte size of the reserved space
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -65,7 +65,7 @@
   }
 };
 
-// Checks all objects for the existance of some type of mark,
+// Checks all objects for the existence of some type of mark,
 // precise or imprecise, dirty or newgen.
 class CheckForUnmarkedObjects : public ObjectClosure {
  private:
@@ -84,7 +84,7 @@
   }
 
   // Card marks are not precise. The current system can leave us with
-  // a mismash of precise marks and beginning of object marks. This means
+  // a mismatch of precise marks and beginning of object marks. This means
   // we test for missing precise marks first. If any are found, we don't
   // fail unless the object head is also unmarked.
   virtual void do_object(oop obj) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -202,12 +202,12 @@
     list->print("list:");
   }
   if (list->is_empty()) {
-    // Enqueuing the empty list: nothing to do.
+    // Enqueueing the empty list: nothing to do.
     return;
   }
   uint list_length = list->length();
   if (is_empty()) {
-    // Enqueuing to empty list: just acquire elements.
+    // Enqueueing to empty list: just acquire elements.
     set_insert_end(list->insert_end());
     set_remove_end(list->remove_end());
     set_length(list_length);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -303,7 +303,7 @@
 // load balancing (i.e., over partitioning).  The last task to be
 // executed by a GC thread in a job is a work stealing task.  A
 // GC  thread that gets a work stealing task continues to execute
-// that task until the job is done.  In the static number of GC theads
+// that task until the job is done.  In the static number of GC threads
 // case, tasks are added to a queue (FIFO).  The work stealing tasks are
 // the last to be added.  Once the tasks are added, the GC threads grab
 // a task and go.  A single thread can do all the non-work stealing tasks
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -673,7 +673,7 @@
 
 // Before delegating the resize to the young generation,
 // the reserved space for the young and old generations
-// may be changed to accomodate the desired resize.
+// may be changed to accommodate the desired resize.
 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
     size_t survivor_size) {
   if (UseAdaptiveGCBoundary) {
@@ -690,7 +690,7 @@
 
 // Before delegating the resize to the old generation,
 // the reserved space for the young and old generations
-// may be changed to accomodate the desired resize.
+// may be changed to accommodate the desired resize.
 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
   if (UseAdaptiveGCBoundary) {
     if (size_policy()->bytes_absorbed_from_eden() != 0) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -45,7 +45,7 @@
 // the do_it() method of a ThreadRootsMarkingTask is executed, it
 // starts marking from the thread's roots.
 //
-// The enqueuing of the MarkFromRootsTask and ThreadRootsMarkingTask
+// The enqueueing of the MarkFromRootsTask and ThreadRootsMarkingTask
 // do little more than create the task and put it on a queue.  The
 // queue is a GCTaskQueue and threads steal tasks from this GCTaskQueue.
 //
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -482,7 +482,7 @@
   //   adjust down the total heap size.  Adjust down the larger of the
   //   generations.
 
-  // Add some checks for a threshhold for a change.  For example,
+  // Add some checks for a threshold for a change.  For example,
   // a change less than the necessary alignment is probably not worth
   // attempting.
 
@@ -1161,7 +1161,7 @@
     // We use the tenuring threshold to equalize the cost of major
     // and minor collections.
     // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost betweent the
+    // tenuring threshold is to differences in cost between the
     // collection types.
 
     // Get the times of interest. This involves a little work, so
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -37,7 +37,7 @@
 //
 // It also computes an optimal tenuring threshold between the young
 // and old generations, so as to equalize the cost of collections
-// of those generations, as well as optimial survivor space sizes
+// of those generations, as well as optimal survivor space sizes
 // for the young generation.
 //
 // While this class is specifically intended for a generational system
@@ -113,7 +113,7 @@
   // Changing the generation sizing depends on the data that is
   // gathered about the effects of changes on the pause times and
   // throughput.  These variable count the number of data points
-  // gathered.  The policy may use these counters as a threshhold
+  // gathered.  The policy may use these counters as a threshold
   // for reliable data.
   julong _young_gen_change_for_major_pause_count;
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -195,7 +195,7 @@
 
   // Update all the counters that can be updated from the size policy.
   // This should be called after all policy changes have been made
-  // and reflected internall in the size policy.
+  // and reflected internally in the size policy.
   void update_counters_from_policy();
 
   // Update counters that can be updated from fields internal to the
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -661,7 +661,7 @@
 }
 
 jlong PSMarkSweep::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong ret_val = now - _time_of_last_gc;
@@ -674,7 +674,7 @@
 }
 
 void PSMarkSweep::reset_millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -280,7 +280,7 @@
         "Should be true before post_resize()");
       MemRegion mangle_region(object_space()->end(), virtual_space_high);
       // Note that the object space has not yet been updated to
-      // coincede with the new underlying virtual space.
+      // coincide with the new underlying virtual space.
       SpaceMangler::mangle_region(mangle_region);
     }
     post_resize();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -187,7 +187,7 @@
 
   void space_invariants() PRODUCT_RETURN;
 
-  // Performace Counter support
+  // Performance Counter support
   void update_counters();
 
   // Printing support
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -2176,7 +2176,7 @@
 
     heap->resize_all_tlabs();
 
-    // Resize the metaspace capactiy after a collection
+    // Resize the metaspace capacity after a collection
     MetaspaceGC::compute_new_size();
 
     if (TraceGen1Time) accumulated_time()->stop();
@@ -3285,7 +3285,7 @@
 }
 
 jlong PSParallelCompact::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong ret_val = now - _time_of_last_gc;
@@ -3298,7 +3298,7 @@
 }
 
 void PSParallelCompact::reset_millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -877,7 +877,7 @@
 // The summary phase calculates the total live data to the left of each region
 // XXX.  Based on that total and the bottom of the space, it can calculate the
 // starting location of the live data in XXX.  The summary phase calculates for
-// each region XXX quantites such as
+// each region XXX quantities such as
 //
 //      - the amount of live data at the beginning of a region from an object
 //        entering the region.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -78,7 +78,7 @@
   // Returns a subregion containing all objects in this space.
   MemRegion used_region()            { return MemRegion(bottom(), top()); }
 
-  // Boolean querries.
+  // Boolean queries.
   bool is_empty() const              { return used() == 0; }
   bool not_empty() const             { return used() > 0; }
   bool contains(const void* p) const { return _bottom <= p && p < _end; }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -558,7 +558,7 @@
             ((gc_cause != GCCause::_java_lang_system_gc) ||
               UseAdaptiveSizePolicyWithSystemGC)) {
 
-          // Calculate optimial free space amounts
+          // Calculate optimal free space amounts
           assert(young_gen->max_size() >
             young_gen->from_space()->capacity_in_bytes() +
             young_gen->to_space()->capacity_in_bytes(),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -35,7 +35,7 @@
 class PSVirtualSpace : public CHeapObj<mtGC> {
   friend class VMStructs;
  protected:
-  // The space is committed/uncommited in chunks of size _alignment.  The
+  // The space is committed/uncommitted in chunks of size _alignment.  The
   // ReservedSpace passed to initialize() must be aligned to this value.
   const size_t _alignment;
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -136,7 +136,7 @@
     // generation - the less space committed, the smaller the survivor
     // space, possibly as small as an alignment. However, we are interested
     // in the case where the young generation is 100% committed, as this
-    // is the point where eden reachs its maximum size. At this point,
+    // is the point where eden reaches its maximum size. At this point,
     // the size of a survivor space is max_survivor_size.
     max_eden_size = size - 2 * max_survivor_size;
   }
@@ -288,7 +288,7 @@
   // There used to be this guarantee there.
   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
   // Code below forces this requirement.  In addition the desired eden
-  // size and disired survivor sizes are desired goals and may
+  // size and desired survivor sizes are desired goals and may
   // exceed the total generation size.
 
   assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -121,7 +121,7 @@
 
   // Choose a number of GC threads based on the current size
   // of the heap.  This may be complicated because the size of
-  // the heap depends on factors such as the thoughput goal.
+  // the heap depends on factors such as the throughput goal.
   // Still a large heap should be collected by more GC threads.
   active_workers_by_heap_size =
       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
@@ -445,7 +445,7 @@
   // into account (i.e., don't trigger if the amount of free
   // space has suddenly jumped up).  If the current is much
   // higher than the average, use the average since it represents
-  // the longer term behavor.
+  // the longer term behavior.
   const size_t live_in_eden =
     MIN2(eden_live, (size_t) avg_eden_live()->average());
   const size_t free_in_eden = max_eden_size > live_in_eden ?
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -74,7 +74,7 @@
   };
 
   // Goal for the fraction of the total time during which application
-  // threads run.
+  // threads run
   const double _throughput_goal;
 
   // Last calculated sizes, in bytes, and aligned
@@ -83,21 +83,21 @@
 
   size_t _survivor_size;    // calculated survivor size in bytes
 
-  // This is a hint for the heap:  we've detected that gc times
+  // This is a hint for the heap:  we've detected that GC times
   // are taking longer than GCTimeLimit allows.
   bool _gc_overhead_limit_exceeded;
   // Use for diagnostics only.  If UseGCOverheadLimit is false,
   // this variable is still set.
   bool _print_gc_overhead_limit_would_be_exceeded;
   // Count of consecutive GC that have exceeded the
-  // GC time limit criterion.
+  // GC time limit criterion
   uint _gc_overhead_limit_count;
   // This flag signals that GCTimeLimit is being exceeded
-  // but may not have done so for the required number of consequetive
-  // collections.
+  // but may not have done so for the required number of consecutive
+  // collections
 
   // Minor collection timers used to determine both
-  // pause and interval times for collections.
+  // pause and interval times for collections
   static elapsedTimer _minor_timer;
 
   // Major collection timers, used to determine both
@@ -120,7 +120,7 @@
   // Statistics for survivor space calculation for young generation
   AdaptivePaddedAverage*   _avg_survived;
 
-  // Objects that have been directly allocated in the old generation.
+  // Objects that have been directly allocated in the old generation
   AdaptivePaddedNoZeroDevAverage*   _avg_pretenured;
 
   // Variable for estimating the major and minor pause times.
@@ -142,33 +142,33 @@
   // for making ergonomic decisions.
   double _latest_minor_mutator_interval_seconds;
 
-  // Allowed difference between major and minor gc times, used
-  // for computing tenuring_threshold.
+  // Allowed difference between major and minor GC times, used
+  // for computing tenuring_threshold
   const double _threshold_tolerance_percent;
 
-  const double _gc_pause_goal_sec; // goal for maximum gc pause
+  const double _gc_pause_goal_sec; // Goal for maximum GC pause
 
   // Flag indicating that the adaptive policy is ready to use
   bool _young_gen_policy_is_ready;
 
-  // decrease/increase the young generation for minor pause time
+  // Decrease/increase the young generation for minor pause time
   int _change_young_gen_for_min_pauses;
 
-  // decrease/increase the old generation for major pause time
+  // Decrease/increase the old generation for major pause time
   int _change_old_gen_for_maj_pauses;
 
-  //   change old geneneration for throughput
+  //   change old generation for throughput
   int _change_old_gen_for_throughput;
 
   //   change young generation for throughput
   int _change_young_gen_for_throughput;
 
   // Flag indicating that the policy would
-  //   increase the tenuring threshold because of the total major gc cost
-  //   is greater than the total minor gc cost
+  //   increase the tenuring threshold because of the total major GC cost
+  //   is greater than the total minor GC cost
   bool _increment_tenuring_threshold_for_gc_cost;
-  //   decrease the tenuring threshold because of the the total minor gc
-  //   cost is greater than the total major gc cost
+  //   decrease the tenuring threshold because of the the total minor GC
+  //   cost is greater than the total major GC cost
   bool _decrement_tenuring_threshold_for_gc_cost;
   //   decrease due to survivor size limit
   bool _decrement_tenuring_threshold_for_survivor_limit;
@@ -182,7 +182,7 @@
   // Changing the generation sizing depends on the data that is
   // gathered about the effects of changes on the pause times and
   // throughput.  These variable count the number of data points
-  // gathered.  The policy may use these counters as a threshhold
+  // gathered.  The policy may use these counters as a threshold
   // for reliable data.
   julong _young_gen_change_for_minor_throughput;
   julong _old_gen_change_for_major_throughput;
@@ -225,7 +225,7 @@
   // larger than 1.0 if just the sum of the minor cost the
   // the major cost is used.  Worse than that is the
   // fact that the minor cost and the major cost each
-  // tend toward 1.0 in the extreme of high gc costs.
+  // tend toward 1.0 in the extreme of high GC costs.
   // Limit the value of gc_cost to 1.0 so that the mutator
   // cost stays non-negative.
   virtual double gc_cost() const {
@@ -238,23 +238,23 @@
   virtual double time_since_major_gc() const;
 
   // Average interval between major collections to be used
-  // in calculating the decaying major gc cost.  An overestimate
+  // in calculating the decaying major GC cost.  An overestimate
   // of this time would be a conservative estimate because
   // this time is used to decide if the major GC cost
   // should be decayed (i.e., if the time since the last
-  // major gc is long compared to the time returned here,
+  // major GC is long compared to the time returned here,
   // then the major GC cost will be decayed).  See the
   // implementations for the specifics.
   virtual double major_gc_interval_average_for_decay() const {
     return _avg_major_interval->average();
   }
 
-  // Return the cost of the GC where the major gc cost
+  // Return the cost of the GC where the major GC cost
   // has been decayed based on the time since the last
   // major collection.
   double decaying_gc_cost() const;
 
-  // Decay the major gc cost.  Use this only for decisions on
+  // Decay the major GC cost.  Use this only for decisions on
   // whether to adjust, not to determine by how much to adjust.
   // This approximation is crude and may not be good enough for the
   // latter.
--- a/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -49,11 +49,11 @@
   // estimates.
   AdaptivePaddedAverage _demand_rate_estimate;
 
-  ssize_t     _desired;         // Demand stimate computed as described above
+  ssize_t     _desired;          // Demand estimate computed as described above
   ssize_t     _coal_desired;     // desired +/- small-percent for tuning coalescing
 
-  ssize_t     _surplus;         // count - (desired +/- small-percent),
-                                // used to tune splitting in best fit
+  ssize_t     _surplus;          // count - (desired +/- small-percent),
+                                 // used to tune splitting in best fit
   ssize_t     _bfr_surp;         // surplus at start of current sweep
   ssize_t     _prev_sweep;       // count from end of previous sweep
   ssize_t     _before_sweep;     // count from before current sweep
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -54,7 +54,7 @@
 void ConcurrentGCThread::create_and_start() {
   if (os::create_thread(this, os::cgc_thread)) {
     // XXX: need to set this to low priority
-    // unless "agressive mode" set; priority
+    // unless "aggressive mode" set; priority
     // should be just less than that of VMThread.
     os::set_priority(this, NearMaxPriority);
     if (!_should_terminate && !DisableStartThread) {
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -159,7 +159,7 @@
 // that no calculation of the slope has yet been done.  Returning true
 // for a slope equal to 0 reflects the intuitive expectation of the
 // dependence on the slope.  Don't use the complement of these functions
-// since that untuitive expectation is not built into the complement.
+// since that intuitive expectation is not built into the complement.
 bool LinearLeastSquareFit::decrement_will_decrease() {
   return (_slope >= 0.00);
 }
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -210,7 +210,7 @@
   double y(double x);
   double slope() { return _slope; }
   // Methods to decide if a change in the dependent variable will
-  // achive a desired goal.  Note that these methods are not
+  // achieve a desired goal.  Note that these methods are not
   // complementary and both are needed.
   bool decrement_will_decrease();
   bool increment_will_decrease();
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -72,7 +72,7 @@
 #endif  // NOT_PRODUCT
 
 // There may be unallocated holes in the middle chunks
-// that should be filled with dead objects to ensure parseability.
+// that should be filled with dead objects to ensure parsability.
 void MutableNUMASpace::ensure_parsability() {
   for (int i = 0; i < lgrp_spaces()->length(); i++) {
     LGRPSpace *ls = lgrp_spaces()->at(i);
@@ -880,8 +880,8 @@
 }
 
 void MutableNUMASpace::verify() {
-  // This can be called after setting an arbitary value to the space's top,
-  // so an object can cross the chunk boundary. We ensure the parsablity
+  // This can be called after setting an arbitrary value to the space's top,
+  // so an object can cross the chunk boundary. We ensure the parsability
   // of the space and just walk the objects in linear fashion.
   ensure_parsability();
   MutableSpace::verify();
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -31,7 +31,7 @@
 
 // A MutableSpace is a subtype of ImmutableSpace that supports the
 // concept of allocation. This includes the concepts that a space may
-// be only partially full, and the querry methods that go with such
+// be only partially full, and the query methods that go with such
 // an assumption. MutableSpace is also responsible for minimizing the
 // page allocation time by having the memory pretouched (with
 // AlwaysPretouch) and for optimizing page placement on NUMA systems
@@ -111,7 +111,7 @@
 
   virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
 
-  // Boolean querries.
+  // Boolean queries.
   bool is_empty() const              { return used_in_words() == 0; }
   bool not_empty() const             { return used_in_words() > 0; }
   bool contains(const void* p) const { return _bottom <= p && p < _end; }
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -152,7 +152,7 @@
 
 // The buffer comes with its own BOT, with a shared (obviously) underlying
 // BlockOffsetSharedArray. We manipulate this BOT in the normal way
-// as we would for any contiguous space. However, on accasion we
+// as we would for any contiguous space. However, on occasion we
 // need to do some buffer surgery at the extremities before we
 // start using the body of the buffer for allocations. Such surgery
 // (as explained elsewhere) is to prevent allocation on a card that
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -92,7 +92,7 @@
   }
 
   // The total (word) size of the buffer, including both allocated and
-  // unallocted space.
+  // unallocated space.
   size_t word_sz() { return _word_sz; }
 
   // Should only be done if we are about to reset with a new buffer of the
--- a/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -75,7 +75,7 @@
 
   // High water mark for allocations.  Typically, the space above
   // this point have been mangle previously and don't need to be
-  // touched again.  Space belows this point has been allocated
+  // touched again.  Space below this point has been allocated
   // and remangling is needed between the current top and this
   // high water mark.
   HeapWord* _top_for_allocations;
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -82,7 +82,7 @@
 
 // Allocations may fail in several threads at about the same time,
 // resulting in multiple gc requests.  We only want to do one of them.
-// In case a GC locker is active and the need for a GC is already signalled,
+// In case a GC locker is active and the need for a GC is already signaled,
 // we want to skip this GC attempt altogether, without doing a futile
 // safepoint operation.
 bool VM_GC_Operation::skip_operation() const {
--- a/hotspot/src/share/vm/gc_interface/gcCause.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -31,7 +31,7 @@
 // This class exposes implementation details of the various
 // collector(s), and we need to be very careful with it. If
 // use of this class grows, we should split it into public
-// and implemenation-private "causes".
+// and implementation-private "causes".
 //
 
 class GCCause : public AllStatic {
--- a/hotspot/src/share/vm/memory/allocation.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -576,8 +576,8 @@
   bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
   bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
   bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
-  ResourceObj(); // default construtor
-  ResourceObj(const ResourceObj& r); // default copy construtor
+  ResourceObj(); // default constructor
+  ResourceObj(const ResourceObj& r); // default copy constructor
   ResourceObj& operator=(const ResourceObj& r); // default copy assignment
   ~ResourceObj();
 #endif // ASSERT
--- a/hotspot/src/share/vm/memory/barrierSet.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -124,7 +124,7 @@
   virtual bool has_read_region_opt() = 0;
   virtual bool has_write_region_opt() = 0;
 
-  // These operations should assert false unless the correponding operation
+  // These operations should assert false unless the corresponding operation
   // above returns true.  Otherwise, they should perform an appropriate
   // barrier for an array whose elements are all in the given memory region.
   virtual void read_ref_array(MemRegion mr) = 0;
@@ -165,7 +165,7 @@
   // normally reserve space for such tables, and commit parts of the table
   // "covering" parts of the heap that are committed.  The constructor is
   // passed the maximum number of independently committable subregions to
-  // be covered, and the "resize_covoered_region" function allows the
+  // be covered, and the "resize_covered_region" function allows the
   // sub-parts of the heap to inform the barrier set of changes of their
   // sizes.
   BarrierSet(int max_covered_regions) :
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -56,7 +56,7 @@
 template <class Chunk_t, template <class> class FreeList_t>
 void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
   TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
-  if (prev() != NULL) { // interior list node shouldn'r have tree fields
+  if (prev() != NULL) { // interior list node shouldn't have tree fields
     guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
               embedded_list()->right()  == NULL, "should be clear");
   }
@@ -247,7 +247,7 @@
     prevFC->link_after(nextTC);
   }
 
-  // Below this point the embeded TreeList<Chunk_t, FreeList_t> being used for the
+  // Below this point the embedded TreeList<Chunk_t, FreeList_t> being used for the
   // tree node may have changed. Don't use "this"
   // TreeList<Chunk_t, FreeList_t>*.
   // chunk should still be a free chunk (bit set in _prev)
@@ -703,7 +703,7 @@
     // The only use of this method would not pass the root of the
     // tree (as indicated by the assertion above that the tree list
     // has a parent) but the specification does not explicitly exclude the
-    // passing of the root so accomodate it.
+    // passing of the root so accommodate it.
     set_root(NULL);
   }
   debug_only(
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -322,7 +322,7 @@
   void       set_tree_hints(void);
   // Reset statistics for all the lists in the tree.
   void       clear_tree_census(void);
-  // Print the statistcis for all the lists in the tree.  Also may
+  // Print the statistics for all the lists in the tree.  Also may
   // print out summaries.
   void       print_dict_census(void) const;
   void       print_free_lists(outputStream* st) const;
--- a/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -590,7 +590,7 @@
 
   // Otherwise, find the block start using the table, but taking
   // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themsleves.
+  // on the cards themselves.
   size_t index = _array->index_for(addr);
   assert(_array->address_for_index(index) == addr,
          "arg should be start of card");
--- a/hotspot/src/share/vm/memory/blockOffsetTable.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/blockOffsetTable.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -424,7 +424,7 @@
     BlockOffsetArray(array, mr, false),
     _unallocated_block(_bottom) { }
 
-  // accessor
+  // Accessor
   HeapWord* unallocated_block() const {
     assert(BlockOffsetArrayUseUnallocatedBlock,
            "_unallocated_block is not being maintained");
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -98,7 +98,7 @@
                                   "card marking array");
   }
 
-  // The assember store_check code will do an unsigned shift of the oop,
+  // The assembler store_check code will do an unsigned shift of the oop,
   // then add it to byte_map_base, i.e.
   //
   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
@@ -243,7 +243,7 @@
   if (new_region.word_size() != old_region.word_size()) {
     // Commit new or uncommit old pages, if necessary.
     MemRegion cur_committed = _committed[ind];
-    // Extend the end of this _commited region
+    // Extend the end of this _committed region
     // to cover the end of any lower _committed regions.
     // This forms overlapping regions, but never interior regions.
     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
@@ -448,7 +448,7 @@
     // off parallelism is used, then active_workers can be used in
     // place of n_par_threads.
     //  This is an example of a path where n_par_threads is
-    // set to 0 to turn off parallism.
+    // set to 0 to turn off parallelism.
     //  [7] CardTableModRefBS::non_clean_card_iterate()
     //  [8] CardTableRS::younger_refs_in_space_iterate()
     //  [9] Generation::younger_refs_in_space_iterate()
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -590,7 +590,7 @@
       // Then, the case analysis above reveals that, in the worst case,
       // any such stale card will be scanned unnecessarily at most twice.
       //
-      // It is nonethelss advisable to try and get rid of some of this
+      // It is nonetheless advisable to try and get rid of some of this
       // redundant work in a subsequent (low priority) re-design of
       // the card-scanning code, if only to simplify the underlying
       // state machine analysis/proof. ysr 1/28/2002. XXX
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -45,7 +45,7 @@
 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
 #endif // INCLUDE_ALL_GCS
 
-// CollectorPolicy methods.
+// CollectorPolicy methods
 
 CollectorPolicy::CollectorPolicy() :
     _space_alignment(0),
@@ -185,7 +185,7 @@
   // other collectors should also be updated to do their own alignment and then
   // this use of lcm() should be removed.
   if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
+      // In presence of large pages we have to make sure that our
       // alignment is large page aware
       alignment = lcm(os::large_page_size(), alignment);
   }
@@ -193,7 +193,7 @@
   return alignment;
 }
 
-// GenCollectorPolicy methods.
+// GenCollectorPolicy methods
 
 GenCollectorPolicy::GenCollectorPolicy() :
     _min_gen0_size(0),
@@ -375,10 +375,10 @@
     _initial_heap_byte_size = InitialHeapSize;
   }
 
-  // adjust max heap size if necessary
+  // Adjust NewSize and OldSize or MaxHeapSize to match each other
   if (NewSize + OldSize > MaxHeapSize) {
     if (_max_heap_size_cmdline) {
-      // somebody set a maximum heap size with the intention that we should not
+      // Somebody has set a maximum heap size with the intention that we should not
       // exceed it. Adjust New/OldSize as necessary.
       uintx calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
@@ -439,9 +439,8 @@
   // minimum gen0 sizes.
 
   if (_max_heap_byte_size == _min_heap_byte_size) {
-    // The maximum and minimum heap sizes are the same so
-    // the generations minimum and initial must be the
-    // same as its maximum.
+    // The maximum and minimum heap sizes are the same so the generations
+    // minimum and initial must be the same as its maximum.
     _min_gen0_size = max_new_size;
     _initial_gen0_size = max_new_size;
     _max_gen0_size = max_new_size;
@@ -463,8 +462,7 @@
       // For the case where NewSize is the default, use NewRatio
       // to size the minimum and initial generation sizes.
       // Use the default NewSize as the floor for these values.  If
-      // NewRatio is overly large, the resulting sizes can be too
-      // small.
+      // NewRatio is overly large, the resulting sizes can be too small.
       _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
       desired_new_size =
         MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
@@ -483,8 +481,7 @@
     _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
 
     // At this point all three sizes have been checked against the
-    // maximum sizes but have not been checked for consistency
-    // among the three.
+    // maximum sizes but have not been checked for consistency among the three.
 
     // Final check min <= initial <= max
     _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
@@ -492,7 +489,7 @@
     _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
   }
 
-  // Write back to flags if necessary
+  // Write back to flags if necessary.
   if (NewSize != _initial_gen0_size) {
     FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
   }
@@ -538,7 +535,7 @@
 }
 
 // Minimum sizes of the generations may be different than
-// the initial sizes.  An inconsistently is permitted here
+// the initial sizes.  An inconsistency is permitted here
 // in the total size that can be specified explicitly by
 // command line specification of OldSize and NewSize and
 // also a command line specification of -Xms.  Issue a warning
@@ -550,12 +547,12 @@
   // At this point the minimum, initial and maximum sizes
   // of the overall heap and of gen0 have been determined.
   // The maximum gen1 size can be determined from the maximum gen0
-  // and maximum heap size since no explicit flags exits
+  // and maximum heap size since no explicit flags exist
   // for setting the gen1 maximum.
   _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment);
 
   // If no explicit command line flag has been set for the
-  // gen1 size, use what is left for gen1.
+  // gen1 size, use what is left for gen1
   if (!FLAG_IS_CMDLINE(OldSize)) {
     // The user has not specified any value but the ergonomics
     // may have chosen a value (which may or may not be consistent
@@ -567,14 +564,14 @@
     // _max_gen1_size has already been made consistent above
     FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
   } else {
-    // It's been explicitly set on the command line.  Use the
+    // OldSize has been explicitly set on the command line. Use the
     // OldSize and then determine the consequences.
     _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
     _initial_gen1_size = OldSize;
 
     // If the user has explicitly set an OldSize that is inconsistent
     // with other command line flags, issue a warning.
-    // The generation minimums and the overall heap mimimum should
+    // The generation minimums and the overall heap minimum should
     // be within one generation alignment.
     if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
       warning("Inconsistency between minimum heap size and minimum "
@@ -596,7 +593,7 @@
               _min_gen0_size, _initial_gen0_size, _max_gen0_size);
       }
     }
-    // Initial size
+    // The same as above for the old gen initial size.
     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
                           _initial_heap_byte_size)) {
       if (PrintGCDetails && Verbose) {
@@ -606,10 +603,10 @@
       }
     }
   }
-  // Enforce the maximum gen1 size.
+
   _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
 
-  // Check that min gen1 <= initial gen1 <= max gen1
+  // Make sure that min gen1 <= initial gen1 <= max gen1.
   _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
   _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
 
@@ -650,10 +647,9 @@
 
   HeapWord* result = NULL;
 
-  // Loop until the allocation is satisified,
-  // or unsatisfied after GC.
+  // Loop until the allocation is satisfied, or unsatisfied after GC.
   for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
-    HandleMark hm; // discard any handles allocated in each iteration
+    HandleMark hm; // Discard any handles allocated in each iteration.
 
     // First allocation attempt is lock-free.
     Generation *gen0 = gch->get_gen(0);
@@ -666,7 +662,7 @@
         return result;
       }
     }
-    unsigned int gc_count_before;  // read inside the Heap_lock locked region
+    unsigned int gc_count_before;  // Read inside the Heap_lock locked region.
     {
       MutexLocker ml(Heap_lock);
       if (PrintGC && Verbose) {
@@ -685,19 +681,19 @@
 
       if (GC_locker::is_active_and_needs_gc()) {
         if (is_tlab) {
-          return NULL;  // Caller will retry allocating individual object
+          return NULL;  // Caller will retry allocating individual object.
         }
         if (!gch->is_maximal_no_gc()) {
-          // Try and expand heap to satisfy request
+          // Try and expand heap to satisfy request.
           result = expand_heap_and_allocate(size, is_tlab);
-          // result could be null if we are out of space
+          // Result could be null if we are out of space.
           if (result != NULL) {
             return result;
           }
         }
 
         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
-          return NULL; // we didn't get to do a GC and we didn't get any memory
+          return NULL; // We didn't get to do a GC and we didn't get any memory.
         }
 
         // If this thread is not in a jni critical section, we stall
@@ -732,7 +728,7 @@
       result = op.result();
       if (op.gc_locked()) {
          assert(result == NULL, "must be NULL if gc_locked() is true");
-         continue;  // retry and/or stall as necessary
+         continue;  // Retry and/or stall as necessary.
       }
 
       // Allocation has failed and a collection
@@ -793,7 +789,7 @@
     if (!gch->is_maximal_no_gc()) {
       result = expand_heap_and_allocate(size, is_tlab);
     }
-    return result;   // could be null if we are out of space
+    return result;   // Could be null if we are out of space.
   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
     // Do an incremental collection.
     gch->do_collection(false            /* full */,
@@ -915,10 +911,8 @@
                                        GCCause::_metadata_GC_threshold);
     VMThread::execute(&op);
 
-    // If GC was locked out, try again.  Check
-    // before checking success because the prologue
-    // could have succeeded and the GC still have
-    // been locked out.
+    // If GC was locked out, try again. Check before checking success because the
+    // prologue could have succeeded and the GC still have been locked out.
     if (op.gc_locked()) {
       continue;
     }
@@ -979,7 +973,7 @@
 }
 
 void MarkSweepPolicy::initialize_gc_policy_counters() {
-  // initialize the policy counters - 2 collectors, 3 generations
+  // Initialize the policy counters - 2 collectors, 3 generations.
   if (UseParNewGC) {
     _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
   } else {
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -76,10 +76,10 @@
   size_t _heap_alignment;
 
   // Needed to keep information if MaxHeapSize was set on the command line
-  // when the flag value is aligned etc by ergonomics
+  // when the flag value is aligned etc by ergonomics.
   bool _max_heap_size_cmdline;
 
-  // The sizing of the heap are controlled by a sizing policy.
+  // The sizing of the heap is controlled by a sizing policy.
   AdaptiveSizePolicy* _size_policy;
 
   // Set to true when policy wants soft refs cleared.
@@ -102,7 +102,7 @@
     initialize_size_info();
   }
 
-  // Return maximum heap alignment that may be imposed by the policy
+  // Return maximum heap alignment that may be imposed by the policy.
   static size_t compute_heap_alignment();
 
   size_t space_alignment()        { return _space_alignment; }
@@ -180,7 +180,7 @@
                                                        size_t size,
                                                        Metaspace::MetadataType mdtype);
 
-  // Performace Counter support
+  // Performance Counter support
   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 
   // Create the jstat counters for the GC policy.  By default, policy's
@@ -231,9 +231,8 @@
 
   GenerationSpec **_generations;
 
-  // Return true if an allocation should be attempted in the older
-  // generation if it fails in the younger generation.  Return
-  // false, otherwise.
+  // Return true if an allocation should be attempted in the older generation
+  // if it fails in the younger generation.  Return false, otherwise.
   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 
   void initialize_flags();
@@ -245,7 +244,7 @@
   // Try to allocate space by expanding the heap.
   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 
-  // Compute max heap alignment
+  // Compute max heap alignment.
   size_t compute_max_alignment();
 
  // Scale the base_size by NewRatio according to
@@ -253,7 +252,7 @@
  // and align by min_alignment()
  size_t scale_by_NewRatio_aligned(size_t base_size);
 
- // Bound the value by the given maximum minus the min_alignment
+ // Bound the value by the given maximum minus the min_alignment.
  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 
  public:
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -126,7 +126,7 @@
                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // It is important to do this in a way such that concurrent readers can't
-  // temporarily think somethings in the heap.  (Seen this happen in asserts.)
+  // temporarily think something is in the heap.  (Seen this happen in asserts.)
   _reserved.set_word_size(0);
   _reserved.set_start((HeapWord*)heap_rs.base());
   size_t actual_heap_size = heap_rs.size();
@@ -1262,7 +1262,7 @@
 };
 
 jlong GenCollectedHeap::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   GenTimeOfLastGCClosure tolgc_cl(now);
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -315,7 +315,7 @@
   }
 
   // Update the gc statistics for each generation.
-  // "level" is the level of the lastest collection
+  // "level" is the level of the latest collection.
   void update_gc_stats(int current_level, bool full) {
     for (int i = 0; i < _n_gens; i++) {
       _gens[i]->update_gc_stats(current_level, full);
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -148,8 +148,8 @@
   Universe::update_heap_info_at_gc();
 
   // Update time of last gc for all generations we collected
-  // (which curently is all the generations in the heap).
-  // We need to use a monotonically non-deccreasing time in ms
+  // (which currently is all the generations in the heap).
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
--- a/hotspot/src/share/vm/memory/genRemSet.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/genRemSet.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -27,7 +27,7 @@
 
 #include "oops/oop.hpp"
 
-// A GenRemSet provides ways of iterating over pointers accross generations.
+// A GenRemSet provides ways of iterating over pointers across generations.
 // (This is especially useful for older-to-younger.)
 
 class Generation;
@@ -58,7 +58,7 @@
 
   // These are for dynamic downcasts.  Unfortunately that it names the
   // possible subtypes (but not that they are subtypes!)  Return NULL if
-  // the cast is invalide.
+  // the cast is invalid.
   virtual CardTableRS* as_CardTableRS() { return NULL; }
 
   // Return the barrier set associated with "this."
--- a/hotspot/src/share/vm/memory/generation.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/generation.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -289,7 +289,7 @@
 
   // These functions return the addresses of the fields that define the
   // boundaries of the contiguous allocation area.  (These fields should be
-  // physicall near to one another.)
+  // physically near to one another.)
   virtual HeapWord** top_addr() const { return NULL; }
   virtual HeapWord** end_addr() const { return NULL; }
 
@@ -485,7 +485,7 @@
   // General signature...
   virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
   // ...and specializations for de-virtualization.  (The general
-  // implemention of the _nv versions call the virtual version.
+  // implementation of the _nv versions call the virtual version.
   // Note that the _nv suffix is not really semantically necessary,
   // but it avoids some not-so-useful warnings on Solaris.)
 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
--- a/hotspot/src/share/vm/memory/heap.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/heap.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -183,7 +183,7 @@
   size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 
-  // First check if we can satify request from freelist
+  // First check if we can satisfy request from freelist
   debug_only(verify());
   HeapBlock* block = search_freelist(number_of_segments, is_critical);
   debug_only(if (VerifyCodeCacheOften) verify());
@@ -372,7 +372,7 @@
   }
 
   // Scan for right place to put into list. List
-  // is sorted by increasing addresseses
+  // is sorted by increasing addresses
   FreeBlock* prev = NULL;
   FreeBlock* cur  = _freelist;
   while(cur != NULL && cur < b) {
--- a/hotspot/src/share/vm/memory/heap.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/heap.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -127,8 +127,8 @@
   // Heap extents
   bool  reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
   void  release();                               // releases all allocated memory
-  bool  expand_by(size_t size);                  // expands commited memory by size
-  void  shrink_by(size_t size);                  // shrinks commited memory by size
+  bool  expand_by(size_t size);                  // expands committed memory by size
+  void  shrink_by(size_t size);                  // shrinks committed memory by size
   void  clear();                                 // clears all heap contents
 
   // Memory allocation
--- a/hotspot/src/share/vm/memory/heapInspection.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/heapInspection.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -347,7 +347,7 @@
 
 #endif // INCLUDE_SERVICES
 
-// These declarations are needed since teh declaration of KlassInfoTable and
+// These declarations are needed since the declaration of KlassInfoTable and
 // KlassInfoClosure are guarded by #if INLCUDE_SERVICES
 class KlassInfoTable;
 class KlassInfoClosure;
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -2399,7 +2399,7 @@
 
 void SpaceManager::verify() {
   // If there are blocks in the dictionary, then
-  // verfication of chunks does not work since
+  // verification of chunks does not work since
   // being in the dictionary alters a chunk.
   if (block_freelists()->total_size() == 0) {
     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@@ -2868,7 +2868,7 @@
     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
     // If compressed class space fits in lower 32G, we don't need a base.
     if (higher_address <= (address)klass_encoding_max) {
-      lower_base = 0; // effectively lower base is zero.
+      lower_base = 0; // Effectively lower base is zero.
     }
   }
 
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -487,7 +487,7 @@
   NOT_PRODUCT(SystemDictionary::verify();)
 
   // Copy the the symbol table, and the system dictionary to the shared
-  // space in usable form.  Copy the hastable
+  // space in usable form.  Copy the hashtable
   // buckets first [read-write], then copy the linked lists of entries
   // [read-only].
 
@@ -953,7 +953,7 @@
 
   // The following data in the shared misc data region are the linked
   // list elements (HashtableEntry objects) for the symbol table, string
-  // table, and shared dictionary.  The heap objects refered to by the
+  // table, and shared dictionary.  The heap objects referred to by the
   // symbol table, string table, and shared dictionary are permanent and
   // unmovable.  Since new entries added to the string and symbol tables
   // are always added at the beginning of the linked lists, THESE LINKED
--- a/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -72,7 +72,7 @@
   bool has_read_region_opt() { return false; }
 
 
-  // These operations should assert false unless the correponding operation
+  // These operations should assert false unless the corresponding operation
   // above returns true.
   void read_ref_array(MemRegion mr) {
     assert(false, "can't call");
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -45,7 +45,7 @@
 }
 
 void ReferenceProcessor::init_statics() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 
@@ -152,7 +152,7 @@
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
 
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
@@ -168,7 +168,7 @@
   // javaTimeNanos(), which is guaranteed to be monotonically
   // non-decreasing provided the underlying platform provides such
   // a time source (and it is bug free).
-  // In product mode, however, protect ourselves from non-monotonicty.
+  // In product mode, however, protect ourselves from non-monotonicity.
   if (now > _soft_ref_timestamp_clock) {
     _soft_ref_timestamp_clock = now;
     java_lang_ref_SoftReference::set_clock(now);
@@ -349,7 +349,7 @@
 
   oop obj = NULL;
   oop next_d = refs_list.head();
-  if (pending_list_uses_discovered_field()) { // New behaviour
+  if (pending_list_uses_discovered_field()) { // New behavior
     // Walk down the list, self-looping the next field
     // so that the References are not considered active.
     while (obj != next_d) {
@@ -366,7 +366,7 @@
       // Post-barrier not needed when looping to self.
       java_lang_ref_Reference::set_next_raw(obj, obj);
       if (next_d == obj) {  // obj is last
-        // Swap refs_list into pendling_list_addr and
+        // Swap refs_list into pending_list_addr and
         // set obj's discovered to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
         // Need post-barrier on pending_list_addr above;
@@ -376,7 +376,7 @@
         oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
       }
     }
-  } else { // Old behaviour
+  } else { // Old behavior
     // Walk down the list, copying the discovered field into
     // the next field and clearing the discovered field.
     while (obj != next_d) {
@@ -390,7 +390,7 @@
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "The reference should not be enqueued");
       if (next_d == obj) {  // obj is last
-        // Swap refs_list into pendling_list_addr and
+        // Swap refs_list into pending_list_addr and
         // set obj's next to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
         // Need oop_check on pending_list_addr above;
@@ -1341,7 +1341,7 @@
 // whose referents are still alive, whose referents are NULL or which
 // are not active (have a non-NULL next field). NOTE: When we are
 // thus precleaning the ref lists (which happens single-threaded today),
-// we do not disable refs discovery to honour the correct semantics of
+// we do not disable refs discovery to honor the correct semantics of
 // java.lang.Reference. As a result, we need to be careful below
 // that ref removal steps interleave safely with ref discovery steps
 // (in this thread).
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -474,7 +474,7 @@
   bool processing_is_mt() const { return _processing_is_mt; }
   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 
-  // whether all enqueuing of weak references is complete
+  // whether all enqueueing of weak references is complete
   bool enqueuing_is_done()  { return _enqueuing_is_done; }
   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 
--- a/hotspot/src/share/vm/memory/resourceArea.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/resourceArea.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -196,7 +196,7 @@
 // leveraging existing data structures if we simply create a way to manage this one
 // special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj
 // then existing ResourceMarks would work fine since no one use new to allocate them
-// and they would be stack allocated. This leaves open the possibilty of accidental
+// and they would be stack allocated. This leaves open the possibility of accidental
 // misuse so we simple duplicate the ResourceMark functionality here.
 
 class DeoptResourceMark: public CHeapObj<mtInternal> {
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -92,7 +92,7 @@
 //  0 is a "special" value in set_n_threads() which translates to
 //  setting _n_threads to 1.
 //
-//  Some code uses _n_terminiation to decide if work should be done in
+//  Some code uses _n_termination to decide if work should be done in
 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
 //  is an example of such code.  Look for variable "is_par" for other
 //  examples.
--- a/hotspot/src/share/vm/memory/space.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -112,7 +112,7 @@
   // cards are processed. For instance, CMS must remember mutator updates
   // (i.e. dirty cards) so as to re-scan mutated objects.
   // Such work can be piggy-backed here on dirty card scanning, so as to make
-  // it slightly more efficient than doing a complete non-detructive pre-scan
+  // it slightly more efficient than doing a complete non-destructive pre-scan
   // of the card table.
   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
   if (pCl != NULL) {
@@ -324,8 +324,8 @@
 }
 
 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
-  // Space should not advertize an increase in size
-  // until after the underlying offest table has been enlarged.
+  // Space should not advertise an increase in size
+  // until after the underlying offset table has been enlarged.
   _offsets.resize(pointer_delta(new_end, bottom()));
   Space::set_end(new_end);
 }
@@ -729,7 +729,7 @@
   object_iterate_from(bm, blk);
 }
 
-// For a continguous space object_iterate() and safe_object_iterate()
+// For a ContiguousSpace object_iterate() and safe_object_iterate()
 // are the same.
 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
--- a/hotspot/src/share/vm/memory/space.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -56,7 +56,7 @@
 
 // Here's the Space hierarchy:
 //
-// - Space               -- an asbtract base class describing a heap area
+// - Space               -- an abstract base class describing a heap area
 //   - CompactibleSpace  -- a space supporting compaction
 //     - CompactibleFreeListSpace -- (used for CMS generation)
 //     - ContiguousSpace -- a compactible space in which all free space
@@ -159,7 +159,7 @@
   // (that is, if the space is contiguous), then this region must contain only
   // such objects: the memregion will be from the bottom of the region to the
   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
-  // the space must distiguish between objects in the region allocated before
+  // the space must distinguish between objects in the region allocated before
   // and after the call to save marks.
   virtual MemRegion used_region_at_save_marks() const {
     return MemRegion(bottom(), saved_mark_word());
@@ -190,7 +190,7 @@
 
   // Returns true iff the given the space contains the
   // given address as part of an allocated object. For
-  // ceratin kinds of spaces, this might be a potentially
+  // certain kinds of spaces, this might be a potentially
   // expensive operation. To prevent performance problems
   // on account of its inadvertent use in product jvm's,
   // we restrict its use to assertion checks only.
@@ -244,13 +244,13 @@
   // Return an address indicating the extent of the iteration in the
   // event that the iteration had to return because of finding an
   // uninitialized object in the space, or if the closure "cl"
-  // signalled early termination.
+  // signaled early termination.
   virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   virtual HeapWord* object_iterate_careful_m(MemRegion mr,
                                              ObjectClosureCareful* cl);
 
   // Create and return a new dirty card to oop closure. Can be
-  // overriden to return the appropriate type of closure
+  // overridden to return the appropriate type of closure
   // depending on the type of space in which the closure will
   // operate. ResourceArea allocated.
   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
@@ -474,13 +474,13 @@
   // be one, since compaction must succeed -- we go to the first space of
   // the previous generation if necessary, updating "cp"), reset compact_top
   // and then forward.  In either case, returns the new value of "compact_top".
-  // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
+  // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
   // function of the then-current compaction space, and updates "cp->threshold
   // accordingly".
   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
                     HeapWord* compact_top);
 
-  // Return a size with adjusments as required of the space.
+  // Return a size with adjustments as required of the space.
   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 
 protected:
@@ -500,7 +500,7 @@
 
   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
   // free block of the given "word_len", and that "q", were it an object,
-  // would not move if forwared.  If the size allows, fill the free
+  // would not move if forwarded.  If the size allows, fill the free
   // block with an object, to prevent excessive compaction.  Returns "true"
   // iff the free region was made deadspace, and modifies
   // "allowed_deadspace_words" to reflect the number of available deadspace
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -135,7 +135,7 @@
                     free());
     }
   }
-  // If we had to expand to accomodate promotions from younger generations
+  // If we had to expand to accommodate promotions from younger generations
   if (!result && _capacity_at_prologue < capacity()) {
     result = true;
     if (PrintGC && Verbose) {
--- a/hotspot/src/share/vm/memory/universe.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/memory/universe.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -1136,7 +1136,7 @@
       SystemDictionary::ProtectionDomain_klass(), m);;
   }
 
-  // The folowing is initializing converter functions for serialization in
+  // The following is initializing converter functions for serialization in
   // JVM.cpp. If we clean up the StrictMath code above we may want to find
   // a better solution for this as well.
   initialize_converter_functions();
@@ -1178,7 +1178,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   KlassDepChange changes(dependee);
@@ -1199,7 +1199,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   CallSiteDepChange changes(call_site(), method_handle());
@@ -1230,7 +1230,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   // Compute the dependent nmethods
--- a/hotspot/src/share/vm/oops/method.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/oops/method.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -38,13 +38,11 @@
 #include "utilities/accessFlags.hpp"
 #include "utilities/growableArray.hpp"
 
-// A Method* represents a Java method.
+// A Method represents a Java method.
 //
 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
 // so keeping the size of this structure small has a big impact on footprint.
 //
-// We put all oops and method_size first for better gc cache locality.
-//
 // The actual bytecodes are inlined after the end of the Method struct.
 //
 // There are bits in the access_flags telling whether inlined tables are present.
@@ -64,17 +62,17 @@
 // | header                                               |
 // | klass                                                |
 // |------------------------------------------------------|
-// | ConstMethod*                   (oop)                 |
+// | ConstMethod*                   (metadata)            |
 // |------------------------------------------------------|
-// | methodData                     (oop)                 |
-// | methodCounters                                       |
+// | MethodData*                    (metadata)            |
+// | MethodCounters                                       |
 // |------------------------------------------------------|
 // | access_flags                                         |
 // | vtable_index                                         |
 // |------------------------------------------------------|
 // | result_index (C++ interpreter only)                  |
 // |------------------------------------------------------|
-// | method_size             |   intrinsic_id|   flags    |
+// | method_size             | intrinsic_id  |   flags    |
 // |------------------------------------------------------|
 // | code                           (pointer)             |
 // | i2i                            (pointer)             |
--- a/hotspot/src/share/vm/opto/runtime.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -568,8 +568,7 @@
 const TypeFunc *OptoRuntime::uncommon_trap_Type() {
   // create input type (domain)
   const Type **fields = TypeTuple::fields(1);
-  // Symbol* name of class to be loaded
-  fields[TypeFunc::Parms+0] = TypeInt::INT;
+  fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 
   // create result type (range)
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -306,7 +306,7 @@
  *    profiling can start at level 0 and finish at level 3.
  *
  * b. 0 -> 2 -> 3 -> 4.
- *    This case occures when the load on C2 is deemed too high. So, instead of transitioning
+ *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
  *
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -178,7 +178,7 @@
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(),  true));
 
-  // following are JVMTI agent writeable properties.
+  // Following are JVMTI agent writable properties.
   // Properties values are set to NULL and they are
   // os specific they are initialized in os::init_system_properties_values().
   _java_ext_dirs = new SystemProperty("java.ext.dirs", NULL,  true);
@@ -1306,7 +1306,7 @@
   if (!FLAG_IS_DEFAULT(OldPLABSize)) {
     if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
       // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
-      // is.  In this situtation let CMSParPromoteBlocksToClaim follow
+      // is.  In this situation let CMSParPromoteBlocksToClaim follow
       // the value (either from the command line or ergonomics) of
       // OldPLABSize.  Following OldPLABSize is an ergonomics decision.
       FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
@@ -3666,18 +3666,18 @@
   assert(verify_serial_gc_flags(), "SerialGC unset");
 #endif // INCLUDE_ALL_GCS
 
-  // Initialize Metaspace flags and alignments.
+  // Initialize Metaspace flags and alignments
   Metaspace::ergo_initialize();
 
   // Set bytecode rewriting flags
   set_bytecode_flags();
 
-  // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled.
+  // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled
   set_aggressive_opts_flags();
 
   // Turn off biased locking for locking debug mode flags,
-  // which are subtlely different from each other but neither works with
-  // biased locking.
+  // which are subtly different from each other but neither works with
+  // biased locking
   if (UseHeavyMonitors
 #ifdef COMPILER1
       || !UseFastLocking
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -444,7 +444,7 @@
   static bool check_gc_consistency();
   static void check_deprecated_gcs();
   static void check_deprecated_gc_flags();
-  // Check consistecy or otherwise of VM argument settings
+  // Check consistency or otherwise of VM argument settings
   static bool check_vm_args_consistency();
   // Check stack pages settings
   static bool check_stack_pages();
@@ -494,7 +494,7 @@
   // -Xprof
   static bool has_profile()                 { return _has_profile; }
 
-  // -Xms, -Xmx
+  // -Xms
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
--- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -233,7 +233,7 @@
 }
 
 void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
-  // Delay next back-branch event but pump up invocation counter to triger
+  // Delay next back-branch event but pump up invocation counter to trigger
   // whole method compilation.
   MethodCounters* mcs = m->method_counters();
   assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
@@ -251,7 +251,7 @@
 //
 // CounterDecay
 //
-// Interates through invocation counters and decrements them. This
+// Iterates through invocation counters and decrements them. This
 // is done at each safepoint.
 //
 class CounterDecay : public AllStatic {
@@ -321,7 +321,7 @@
 }
 
 // This method can be called by any component of the runtime to notify the policy
-// that it's recommended to delay the complation of this method.
+// that it's recommended to delay the compilation of this method.
 void NonTieredCompPolicy::delay_compilation(Method* method) {
   MethodCounters* mcs = method->method_counters();
   if (mcs != NULL) {
--- a/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -72,7 +72,7 @@
   // reprofile request
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
   // delay_compilation(method) can be called by any component of the runtime to notify the policy
-  // that it's recommended to delay the complation of this method.
+  // that it's recommended to delay the compilation of this method.
   virtual void delay_compilation(Method* method) = 0;
   // disable_compilation() is called whenever the runtime decides to disable compilation of the
   // specified method.
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -380,7 +380,7 @@
   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 
-  // It's possible that the number of paramters at the call site is
+  // It's possible that the number of parameters at the call site is
   // different than number of arguments in the callee when method
   // handles are used.  If the caller is interpreted get the real
   // value so that the proper amount of space can be added to it's
@@ -540,7 +540,7 @@
     // popframe condition bit set, we should always clear it now
     thread->clear_popframe_condition();
 #else
-    // C++ interpeter will clear has_pending_popframe when it enters
+    // C++ interpreter will clear has_pending_popframe when it enters
     // with method_resume. For deopt_resume2 we clear it now.
     if (thread->popframe_forcing_deopt_reexecution())
         thread->clear_popframe_condition();
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -206,7 +206,7 @@
   // Called by assembly stub after execution has returned to
   // deoptimized frame and after the stack unrolling.
   // @argument thread.     Thread where stub_frame resides.
-  // @argument exec_mode.  Determines how execution should be continuted in top frame.
+  // @argument exec_mode.  Determines how execution should be continued in top frame.
   //                       0 means continue after current byte code
   //                       1 means exception has happened, handle exception
   //                       2 means reexecute current bytecode (for uncommon traps).
--- a/hotspot/src/share/vm/runtime/frame.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -649,7 +649,7 @@
 #endif
 }
 
-// Return whether the frame is in the VM or os indicating a Hotspot problem.
+// Print whether the frame is in the VM or OS indicating a HotSpot problem.
 // Otherwise, it's likely a bug in the native library that the Java code calls,
 // hopefully indicating where to submit bugs.
 void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
@@ -928,7 +928,7 @@
     // klass, and the klass needs to be kept alive while executing. The GCs
     // don't trace through method pointers, so typically in similar situations
     // the mirror or the class loader of the klass are installed as a GC root.
-    // To minimze the overhead of doing that here, we ask the GC to pass down a
+    // To minimize the overhead of doing that here, we ask the GC to pass down a
     // closure that knows how to keep klasses alive given a ClassLoaderData.
     cld_f->do_cld(m->method_holder()->class_loader_data());
   }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -3639,7 +3639,7 @@
   product(uintx, MaxDirectMemorySize, 0,                                    \
           "Maximum total size of NIO direct-buffer allocations")            \
                                                                             \
-  /* temporary developer defined flags  */                                  \
+  /* Flags used for temporary code during development  */                   \
                                                                             \
   diagnostic(bool, UseNewCode, false,                                       \
           "Testing Only: Use the new version while testing")                \
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -31,7 +31,7 @@
 
 // Construct enum of Flag_<cmdline-arg> constants.
 
-// Parens left off in the following for the enum decl below.
+// Parenthesis left off in the following for the enum decl below.
 #define FLAG_MEMBER(flag) Flag_##flag
 
 #define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
--- a/hotspot/src/share/vm/runtime/handles.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/handles.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -267,7 +267,7 @@
 // HandleMarks manually.
 //
 // A HandleMark constructor will record the current handle area top, and the
-// desctructor will reset the top, destroying all handles allocated in between.
+// destructor will reset the top, destroying all handles allocated in between.
 // The following code will therefore NOT work:
 //
 //   Handle h;
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -302,7 +302,7 @@
   // Check if we need to wrap a potential OS exception handler around thread
   // This is used for e.g. Win32 structured exception handlers
   assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls");
-  // Need to wrap each and everytime, since there might be native code down the
+  // Need to wrap each and every time, since there might be native code down the
   // stack that has installed its own exception handlers
   os::os_exception_wrapper(call_helper, result, &method, args, THREAD);
 }
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -461,7 +461,7 @@
     // Append new block
     Thread* thread = Thread::current();
     Handle obj_handle(thread, obj);
-    // This can block, so we need to preserve obj accross call.
+    // This can block, so we need to preserve obj across call.
     _last->_next = JNIHandleBlock::allocate_block(thread);
     _last = _last->_next;
     _allocate_before_rebuild--;
@@ -528,7 +528,7 @@
   return result;
 }
 
-// This method is not thread-safe, i.e., must be called whule holding a lock on the
+// This method is not thread-safe, i.e., must be called while holding a lock on the
 // structure.
 long JNIHandleBlock::memory_usage() const {
   return length() * sizeof(JNIHandleBlock);
--- a/hotspot/src/share/vm/runtime/jniHandles.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/jniHandles.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -106,7 +106,7 @@
   JNIHandleBlock* _next;                        // Link to next block
 
   // The following instance variables are only used by the first block in a chain.
-  // Having two types of blocks complicates the code and the space overhead in negligble.
+  // Having two types of blocks complicates the code and the space overhead in negligible.
   JNIHandleBlock* _last;                        // Last block in use
   JNIHandleBlock* _pop_frame_link;              // Block to restore on PopLocalFrame call
   oop*            _free_list;                   // Handle free list
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -507,7 +507,7 @@
   _OnDeck = NULL ;
 
   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
-  // epilog immediately after having acquired the outer lock.
+  // epilogue immediately after having acquired the outer lock.
   // But instead we could consider the following optimizations:
   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
   //    This might avoid potential reacquisition of the inner lock in IUlock().
@@ -931,7 +931,7 @@
 
   check_block_state(Self);
   if (Self->is_Java_thread()) {
-    // Horribile dictu - we suffer through a state transition
+    // Horrible dictu - we suffer through a state transition
     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
     ThreadBlockInVM tbivm ((JavaThread *) Self) ;
     ILock (Self) ;
@@ -963,7 +963,7 @@
 }
 
 
-// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
+// Returns true if thread succeeds in grabbing the lock, otherwise false.
 
 bool Monitor::try_lock() {
   Thread * const Self = Thread::current();
--- a/hotspot/src/share/vm/runtime/mutex.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/mutex.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -90,7 +90,7 @@
   // A special lock: Is a lock where you are guaranteed not to block while you are
   // holding it, i.e., no vm operation can happen, taking other locks, etc.
   // NOTE: It is critical that the rank 'special' be the lowest (earliest)
-  // (except for "event"?) for the deadlock dection to work correctly.
+  // (except for "event"?) for the deadlock detection to work correctly.
   // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
   // which being external to the VM are not subject to deadlock detection.
   // The rank safepoint is used only for synchronization in reaching a
@@ -241,7 +241,7 @@
 //
 // Currently, however, the base object is a monitor.  Monitor contains all the
 // logic for wait(), notify(), etc.   Mutex extends monitor and restricts the
-// visiblity of wait(), notify(), and notify_all().
+// visibility of wait(), notify(), and notify_all().
 //
 // Another viable alternative would have been to have Monitor extend Mutex and
 // implement all the normal mutex and wait()-notify() logic in Mutex base class.
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -43,7 +43,7 @@
 // Mutexes used in the VM.
 
 extern Mutex*   Patching_lock;                   // a lock used to guard code patching of compiled code
-extern Monitor* SystemDictionary_lock;           // a lock on the system dictonary
+extern Monitor* SystemDictionary_lock;           // a lock on the system dictionary
 extern Mutex*   PackageTable_lock;               // a lock on the class loader package table
 extern Mutex*   CompiledIC_lock;                 // a lock used to guard compiled IC patching and access
 extern Mutex*   InlineCacheBuffer_lock;          // a lock used to guard the InlineCacheBuffer
@@ -345,8 +345,8 @@
 //   - reentrant locking
 //   - locking out of order
 //
-// Only too be used for verify code, where we can relaxe out dead-lock
-// dection code a bit (unsafe, but probably ok). This code is NEVER to
+// Only to be used for verify code, where we can relax out dead-lock
+// detection code a bit (unsafe, but probably ok). This code is NEVER to
 // be included in a product version.
 //
 class VerifyMutexLocker: StackObj {
@@ -358,7 +358,7 @@
     _mutex     = mutex;
     _reentrant = mutex->owned_by_self();
     if (!_reentrant) {
-      // We temp. diable strict safepoint checking, while we require the lock
+      // We temp. disable strict safepoint checking, while we require the lock
       FlagSetting fs(StrictSafepointChecks, false);
       _mutex->lock();
     }
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -234,7 +234,7 @@
 // * Taken together, the cxq and the EntryList constitute or form a
 //   single logical queue of threads stalled trying to acquire the lock.
 //   We use two distinct lists to improve the odds of a constant-time
-//   dequeue operation after acquisition (in the ::enter() epilog) and
+//   dequeue operation after acquisition (in the ::enter() epilogue) and
 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
 //   A key desideratum is to minimize queue & monitor metadata manipulation
 //   that occurs while holding the monitor lock -- that is, we want to
@@ -677,7 +677,7 @@
         // non-null and elect a new "Responsible" timer thread.
         //
         // This thread executes:
-        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
+        //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
         //    LD cxq|EntryList               (in subsequent exit)
         //
         // Entering threads in the slow/contended path execute:
@@ -2031,7 +2031,7 @@
           TEVENT (Spin abort -- too many spinners) ;
           return 0 ;
        }
-       // Slighty racy, but benign ...
+       // Slightly racy, but benign ...
        Adjust (&_Spinner, 1) ;
     }
 
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -101,7 +101,7 @@
   static int Spinner_offset_in_bytes()     { return offset_of(ObjectMonitor, _Spinner);    }
 
  public:
-  // Eventaully we'll make provisions for multiple callbacks, but
+  // Eventually we'll make provisions for multiple callbacks, but
   // now one will suffice.
   static int (*SpinCallbackFunction)(intptr_t, int) ;
   static intptr_t SpinCallbackArgument ;
@@ -272,7 +272,7 @@
   // type int, or int32_t but not intptr_t.  There's no reason
   // to use 64-bit fields for these variables on a 64-bit JVM.
 
-  volatile intptr_t  _count;        // reference count to prevent reclaimation/deflation
+  volatile intptr_t  _count;        // reference count to prevent reclamation/deflation
                                     // at stop-the-world time.  See deflate_idle_monitors().
                                     // _count is approximately |_WaitSet| + |_EntryList|
  protected:
--- a/hotspot/src/share/vm/runtime/orderAccess.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/orderAccess.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -61,13 +61,13 @@
 //
 // Ensures that Load1 completes before Store2 and any subsequent store
 // operations.  Loads before Load1 may *not* float below Store2 and any
-// subseqeuent store operations.
+// subsequent store operations.
 //
 // StoreLoad:  Store1(s); StoreLoad; Load2
 //
 // Ensures that Store1 completes before Load2 and any subsequent load
 // operations.  Stores before Store1 may *not* float below Load2 and any
-// subseqeuent load operations.
+// subsequent load operations.
 //
 //
 // We define two further operations, 'release' and 'acquire'.  They are
@@ -176,7 +176,7 @@
 // compilers that we currently use (SunStudio, gcc and VC++) respect the
 // semantics of volatile here. If you build HotSpot using other
 // compilers, you may need to verify that no compiler reordering occurs
-// across the sequence point respresented by the volatile access.
+// across the sequence point represented by the volatile access.
 //
 //
 //                os::is_MP Considered Redundant
@@ -311,7 +311,7 @@
  private:
   // This is a helper that invokes the StubRoutines::fence_entry()
   // routine if it exists, It should only be used by platforms that
-  // don't another way to do the inline eassembly.
+  // don't have another way to do the inline assembly.
   static void StubRoutines_fence();
 };
 
--- a/hotspot/src/share/vm/runtime/os.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/os.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -236,7 +236,7 @@
   while (true) {
     int sig;
     {
-      // FIXME : Currently we have not decieded what should be the status
+      // FIXME : Currently we have not decided what should be the status
       //         for this java thread blocked here. Once we decide about
       //         that we should fix this.
       sig = os::signal_wait();
@@ -583,7 +583,7 @@
   ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
   u_char* obj = start_of_prev_block + space_before;
   if (size <= 0 ) {
-    // start is bad; mayhave been confused by OS data inbetween objects
+    // start is bad; may have been confused by OS data in between objects
     // search one more backwards
     start_of_prev_block = find_cushion_backwards(start_of_prev_block);
     size = *size_addr_from_base(start_of_prev_block);
@@ -1011,7 +1011,7 @@
   if (Universe::heap()->is_in(addr)) {
     HeapWord* p = Universe::heap()->block_start(addr);
     bool print = false;
-    // If we couldn't find it it just may mean that heap wasn't parseable
+    // If we couldn't find it it just may mean that heap wasn't parsable
     // See if we were just given an oop directly
     if (p != NULL && Universe::heap()->block_is_obj(p)) {
       print = true;
@@ -1446,7 +1446,7 @@
 // >= 2 physical CPU's and >=2GB of memory, with some fuzz
 // because the graphics memory (?) sometimes masks physical memory.
 // If you want to change the definition of a server class machine
-// on some OS or platform, e.g., >=4GB on Windohs platforms,
+// on some OS or platform, e.g., >=4GB on Windows platforms,
 // then you'll have to parameterize this method based on that state,
 // as was done for logical processors here, or replicate and
 // specialize this method for each platform.  (Or fix os to have
--- a/hotspot/src/share/vm/runtime/os.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/os.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -395,7 +395,7 @@
     // was equal.  However, some platforms mask off faulting addresses
     // to the page size, so now we just check that the address is
     // within the page.  This makes the thread argument unnecessary,
-    // but we retain the NULL check to preserve existing behaviour.
+    // but we retain the NULL check to preserve existing behavior.
     if (thread == NULL) return false;
     address page = (address) _mem_serialize_page;
     return addr >= page && addr < (page + os::vm_page_size());
@@ -540,7 +540,7 @@
 
   // Loads .dll/.so and
   // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
+  // same architecture as HotSpot is running on
   static void* dll_load(const char *name, char *ebuf, int ebuflen);
 
   // lookup symbol in a shared library
--- a/hotspot/src/share/vm/runtime/park.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/park.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -152,7 +152,7 @@
 
 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 // allocate() and release() code for use by Parkers.  The Parker:: forms
-// will eventually be removed as we consolide and shift over to ParkEvents
+// will eventually be removed as we consolidate and shift over to ParkEvents
 // for both builtin synchronization and JSR166 operations.
 
 volatile int Parker::ListLock = 0 ;
--- a/hotspot/src/share/vm/runtime/perfData.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/perfData.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -39,7 +39,7 @@
 PerfDataList*   PerfDataManager::_constants = NULL;
 
 /*
- * The jvmstat global and subsysem jvmstat counter name spaces. The top
+ * The jvmstat global and subsystem jvmstat counter name spaces. The top
  * level name spaces imply the interface stability level of the counter,
  * which generally follows the Java package, class, and property naming
  * conventions. The CounterNS enumeration values should be used to index
--- a/hotspot/src/share/vm/runtime/perfData.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/perfData.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -116,7 +116,7 @@
  *
  * A PerfData subtype is not required to provide an implementation for
  * each variability classification. For example, the String type provides
- * Variable and Constant variablility classifications in the PerfStringVariable
+ * Variable and Constant variability classifications in the PerfStringVariable
  * and PerfStringConstant classes, but does not provide a counter type.
  *
  * Performance data are also described by a unit of measure. Units allow
@@ -172,10 +172,10 @@
  *   foo_counter->inc();
  *
  * Creating a performance counter that holds a variably change long
- * data value with untis specified in U_Bytes in the "com.sun.ci
+ * data value with units specified in U_Bytes in the "com.sun.ci
  * name space.
  *
- *   PerfLongVariable* bar_varible;
+ *   PerfLongVariable* bar_variable;
  *   bar_variable = PerfDataManager::create_long_variable(COM_CI, "bar",
 .*                                                        PerfData::U_Bytes,
  *                                                        optionalInitialValue,
@@ -203,7 +203,7 @@
  *    In this example, the PerfData pointer can be ignored as the caller
  *    is relying on the StatSampler PeriodicTask to sample the given
  *    address at a regular interval. The interval is defined by the
- *    PerfDataSamplingInterval global variable, and is applyied on
+ *    PerfDataSamplingInterval global variable, and is applied on
  *    a system wide basis, not on an per-counter basis.
  *
  * Creating a performance counter in an arbitrary name space that utilizes
@@ -234,7 +234,7 @@
  * the UsePerfData flag. Counters will be created on the c-heap
  * if UsePerfData is false.
  *
- * Until further noice, all PerfData objects should be created and
+ * Until further notice, all PerfData objects should be created and
  * manipulated within a guarded block. The guard variable is
  * UsePerfData, a product flag set to true by default. This flag may
  * be removed from the product in the future.
@@ -586,7 +586,7 @@
  *
  * The abstraction is not complete. A more general container class
  * would provide an Iterator abstraction that could be used to
- * traverse the lists. This implementation still relys upon integer
+ * traverse the lists. This implementation still relies upon integer
  * iterators and the at(int index) method. However, the GrowableArray
  * is not directly visible outside this class and can be replaced by
  * some other implementation, as long as that implementation provides
--- a/hotspot/src/share/vm/runtime/perfMemory.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/perfMemory.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -55,7 +55,7 @@
  * of the fields must be changed along with their counterparts in the
  * PerfDataBuffer Java class. The first four bytes of this structure
  * should never change, or compatibility problems between the monitoring
- * applications and Hotspot VMs will result. The reserved fields are
+ * applications and HotSpot VMs will result. The reserved fields are
  * available for future enhancements.
  */
 typedef struct {
--- a/hotspot/src/share/vm/runtime/reflection.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/reflection.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -482,7 +482,7 @@
     ik = InstanceKlass::cast(hc);
 
     // There's no way to make a host class loop short of patching memory.
-    // Therefore there cannot be a loop here unles there's another bug.
+    // Therefore there cannot be a loop here unless there's another bug.
     // Still, let's check for it.
     assert(--inf_loop_check > 0, "no host_klass loop");
   }
--- a/hotspot/src/share/vm/runtime/reflection.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/reflection.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -138,9 +138,9 @@
   static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS);
 
 public:
-  // Method invokation through java.lang.reflect.Method
+  // Method invocation through java.lang.reflect.Method
   static oop      invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS);
-  // Method invokation through java.lang.reflect.Constructor
+  // Method invocation through java.lang.reflect.Constructor
   static oop      invoke_constructor(oop method_mirror, objArrayHandle args, TRAPS);
 
 };
--- a/hotspot/src/share/vm/runtime/registerMap.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/registerMap.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -70,7 +70,7 @@
 //   3) The RegisterMap keeps track of the values of callee-saved registers
 //      from frame to frame (hence, the name).  For some stack traversal the
 //      values of the callee-saved registers does not matter, e.g., if you
-//      only need the static properies such as frame type, pc, and such.
+//      only need the static properties such as frame type, pc, and such.
 //      Updating of the RegisterMap can be turned off by instantiating the
 //      register map as: RegisterMap map(thread, false);
 
--- a/hotspot/src/share/vm/runtime/relocator.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/relocator.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -141,7 +141,7 @@
 }
 
 // size is the new size of the instruction at bci. Hence, if size is less than the current
-// instruction sice, we will shrink the code.
+// instruction size, we will shrink the code.
 methodHandle Relocator::insert_space_at(int bci, int size, u_char inst_buffer[], TRAPS) {
   _changes = new GrowableArray<ChangeItem*> (10);
   _changes->push(new ChangeWiden(bci, size, inst_buffer));
@@ -192,7 +192,7 @@
     // Execute operation
     if (!ci->handle_code_change(this)) return false;
 
-    // Shuffel items up
+    // Shuffle items up
     for (int index = 1; index < _changes->length(); index++) {
       _changes->at_put(index-1, _changes->at(index));
     }
@@ -214,7 +214,7 @@
 }
 
 // We need a special instruction size method, since lookupswitches and tableswitches might not be
-// properly alligned during relocation
+// properly aligned during relocation
 int Relocator::rc_instr_len(int bci) {
   Bytecodes::Code bc= code_at(bci);
   switch (bc) {
@@ -611,7 +611,7 @@
 
   // In case we have shrunken a tableswitch/lookupswitch statement, we store the last
   // bytes that get overwritten. We have to copy the bytes after the change_jumps method
-  // has been called, since it is likly to update last offset in a tableswitch/lookupswitch
+  // has been called, since it is likely to update last offset in a tableswitch/lookupswitch
   if (delta < 0) {
     assert(delta>=-3, "we cannot overwrite more than 3 bytes");
     memcpy(_overwrite, addr_at(bci + ilen + delta), -delta);
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -156,7 +156,7 @@
   // stopped by different mechanisms:
   //
   //  1. Running interpreted
-  //     The interpeter dispatch table is changed to force it to
+  //     The interpreter dispatch table is changed to force it to
   //     check for a safepoint condition between bytecodes.
   //  2. Running in native code
   //     When returning from the native code, a Java thread must check
@@ -282,7 +282,7 @@
       // See the comments in synchronizer.cpp for additional remarks on spinning.
       //
       // In the future we might:
-      // 1. Modify the safepoint scheme to avoid potentally unbounded spinning.
+      // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
       //    This is tricky as the path used by a thread exiting the JVM (say on
       //    on JNI call-out) simply stores into its state field.  The burden
       //    is placed on the VM thread, which must poll (spin).
@@ -489,7 +489,7 @@
     ConcurrentGCThread::safepoint_desynchronize();
   }
 #endif // INCLUDE_ALL_GCS
-  // record this time so VMThread can keep track how much time has elasped
+  // record this time so VMThread can keep track how much time has elapsed
   // since last safepoint.
   _end_of_last_safepoint = os::javaTimeMillis();
 }
@@ -826,7 +826,7 @@
 void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
   if (!timeout_error_printed) {
     timeout_error_printed = true;
-    // Print out the thread infor which didn't reach the safepoint for debugging
+    // Print out the thread info which didn't reach the safepoint for debugging
     // purposes (useful when there are lots of threads in the debugger).
     tty->print_cr("");
     tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
@@ -1093,7 +1093,7 @@
       if (caller_fr.is_deoptimized_frame()) {
         // The exception patch will destroy registers that are still
         // live and will be needed during deoptimization. Defer the
-        // Async exception should have defered the exception until the
+        // Async exception should have deferred the exception until the
         // next safepoint which will be detected when we get into
         // the interpreter so if we have an exception now things
         // are messed up.
--- a/hotspot/src/share/vm/runtime/safepoint.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/safepoint.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -59,7 +59,7 @@
  public:
   enum SynchronizeState {
       _not_synchronized = 0,                   // Threads not synchronized at a safepoint
-                                               // Keep this value 0. See the coment in do_call_back()
+                                               // Keep this value 0. See the comment in do_call_back()
       _synchronizing    = 1,                   // Synchronizing in progress
       _synchronized     = 2                    // All Java threads are stopped at a safepoint. Only VM thread is running
   };
@@ -91,7 +91,7 @@
   } SafepointStats;
 
  private:
-  static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquireing the Threads_lock
+  static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquiring the Threads_lock
   static volatile int _waiting_to_block;       // number of threads we are waiting for to block
   static int _current_jni_active_count;        // Counts the number of active critical natives during the safepoint
 
@@ -106,7 +106,7 @@
 private:
   static long       _end_of_last_safepoint;     // Time of last safepoint in milliseconds
 
-  // statistics
+  // Statistics
   static jlong            _safepoint_begin_time;     // time when safepoint begins
   static SafepointStats*  _safepoint_stats;          // array of SafepointStats struct
   static int              _cur_stat_index;           // current index to the above array
@@ -155,7 +155,7 @@
     _current_jni_active_count++;
   }
 
-  // Called when a thread volantary blocks
+  // Called when a thread voluntarily blocks
   static void   block(JavaThread *thread);
   static void   signal_thread_at_safepoint()              { _waiting_to_block--; }
 
@@ -172,7 +172,7 @@
   static bool is_cleanup_needed();
   static void do_cleanup_tasks();
 
-  // debugging
+  // Debugging
   static void print_state()                                PRODUCT_RETURN;
   static void safepoint_msg(const char* format, ...)       PRODUCT_RETURN;
 
@@ -183,7 +183,7 @@
   static void set_is_at_safepoint()                        { _state = _synchronized; }
   static void set_is_not_at_safepoint()                    { _state = _not_synchronized; }
 
-  // assembly support
+  // Assembly support
   static address address_of_state()                        { return (address)&_state; }
 
   static address safepoint_counter_addr()                  { return (address)&_safepoint_counter; }
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -472,7 +472,7 @@
   return (jdouble)x;
 JRT_END
 
-// Exception handling accross interpreter/compiler boundaries
+// Exception handling across interpreter/compiler boundaries
 //
 // exception_handler_for_return_address(...) returns the continuation address.
 // The continuation address is the entry point of the exception handler of the
@@ -694,8 +694,8 @@
     // Allow abbreviated catch tables.  The idea is to allow a method
     // to materialize its exceptions without committing to the exact
     // routing of exceptions.  In particular this is needed for adding
-    // a synthethic handler to unlock monitors when inlining
-    // synchonized methods since the unlock path isn't represented in
+    // a synthetic handler to unlock monitors when inlining
+    // synchronized methods since the unlock path isn't represented in
     // the bytecodes.
     t = table.entry_for(catch_pco, -1, 0);
   }
@@ -819,7 +819,7 @@
           // Exception happened in CodeCache. Must be either:
           // 1. Inline-cache check in C2I handler blob,
           // 2. Inline-cache check in nmethod, or
-          // 3. Implict null exception in nmethod
+          // 3. Implicit null exception in nmethod
 
           if (!cb->is_nmethod()) {
             bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
@@ -2850,7 +2850,7 @@
 // called from very start of a compiled OSR nmethod.  A temp array is
 // allocated to hold the interesting bits of the interpreter frame.  All
 // active locks are inflated to allow them to move.  The displaced headers and
-// active interpeter locals are copied into the temp buffer.  Then we return
+// active interpreter locals are copied into the temp buffer.  Then we return
 // back to the compiled code.  The compiled code then pops the current
 // interpreter frame off the stack and pushes a new compiled frame.  Then it
 // copies the interpreter locals and displaced headers where it wants.
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -382,7 +382,7 @@
   // present if we see that compiled code is present the compiled call site
   // will be patched/re-resolved so that later calls will run compiled.
 
-  // Aditionally a c2i blob need to have a unverified entry because it can be reached
+  // Additionally a c2i blob need to have a unverified entry because it can be reached
   // in situations where the call site is an inlined cache site and may go megamorphic.
 
   // A i2c adapter is simpler than the c2i adapter. This is because it is assumed
@@ -576,7 +576,7 @@
 // arguments for a Java-compiled call, and jumps to Rmethod-> code()->
 // code_begin().  It is broken to call it without an nmethod assigned.
 // The usual behavior is to lift any register arguments up out of the
-// stack and possibly re-pack the extra arguments to be contigious.
+// stack and possibly re-pack the extra arguments to be contiguous.
 // I2C adapters will save what the interpreter's stack pointer will be
 // after arguments are popped, then adjust the interpreter's frame
 // size to force alignment and possibly to repack the arguments.
@@ -593,7 +593,7 @@
 // outgoing stack args will be dead after the copy.
 //
 // Native wrappers, like adapters, marshal arguments.  Unlike adapters they
-// also perform an offical frame push & pop.  They have a call to the native
+// also perform an official frame push & pop.  They have a call to the native
 // routine in their middles and end in a return (instead of ending in a jump).
 // The native wrappers are stored in real nmethods instead of the BufferBlobs
 // used by the adapters.  The code generation happens here because it's very
@@ -610,7 +610,7 @@
 
 #ifdef ASSERT
   // Captures code and signature used to generate this adapter when
-  // verifing adapter equivalence.
+  // verifying adapter equivalence.
   unsigned char* _saved_code;
   int            _saved_code_length;
 #endif
--- a/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeTrans.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -113,7 +113,7 @@
 }
 
 /* __ieee754_log(x)
- * Return the logrithm of x
+ * Return the logarithm of x
  *
  * Method :
  *   1. Argument Reduction: find k and f such that
--- a/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -223,7 +223,7 @@
  *
  *      fq[]    final product of x*(2/pi) in fq[0],..,fq[jk]
  *
- *      ih      integer. If >0 it indicats q[] is >= 0.5, hence
+ *      ih      integer. If >0 it indicates q[] is >= 0.5, hence
  *              it also indicates the *sign* of the result.
  *
  */
@@ -347,7 +347,7 @@
   if(z==0.0) {
     jz -= 1; q0 -= 24;
     while(iq[jz]==0) { jz--; q0-=24;}
-  } else { /* break z into 24-bit if neccessary */
+  } else { /* break z into 24-bit if necessary */
     z = scalbnA(z,-q0);
     if(z>=two24B) {
       fw = (double)((int)(twon24*z));
@@ -409,7 +409,7 @@
 
 /*
  * ====================================================
- * Copyright (c) 1993 Oracle and/or its affilates. All rights reserved.
+ * Copyright (c) 1993 Oracle and/or its affiliates. All rights reserved.
  *
  * Developed at SunPro, a Sun Microsystems, Inc. business.
  * Permission to use, copy, modify, and distribute this
--- a/hotspot/src/share/vm/runtime/signature.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/signature.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -152,7 +152,7 @@
   _parameter_index = 0;
 }
 
-// Optimized version of iterat_parameters when fingerprint is known
+// Optimized version of iterate_parameters when fingerprint is known
 void SignatureIterator::iterate_parameters( uint64_t fingerprint ) {
   uint64_t saved_fingerprint = fingerprint;
 
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -387,7 +387,7 @@
                                                      int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
-    // Use loop event as an opportinity to also check there's been
+    // Use loop event as an opportunity to also check there's been
     // enough calls.
     CompLevel cur_level = comp_level(mh());
     CompLevel next_level = call_event(mh(), cur_level);
--- a/hotspot/src/share/vm/runtime/statSampler.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/statSampler.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -222,8 +222,8 @@
  * The list of System Properties that have corresponding PerfData
  * string instrumentation created by retrieving the named property's
  * value from System.getProperty() and unconditionally creating a
- * PerfStringConstant object initialized to the retreived value. This
- * is not an exhustive list of Java properties with corresponding string
+ * PerfStringConstant object initialized to the retrieved value. This
+ * is not an exhaustive list of Java properties with corresponding string
  * instrumentation as the create_system_property_instrumentation() method
  * creates other property based instrumentation conditionally.
  */
@@ -325,7 +325,7 @@
   // create string instrumentation for various Java properties.
   create_system_property_instrumentation(CHECK);
 
-  // hotspot flags (from .hotspotrc) and args (from command line)
+  // HotSpot flags (from .hotspotrc) and args (from command line)
   //
   PerfDataManager::create_string_constant(JAVA_RT, "vmFlags",
                                           Arguments::jvm_flags(), CHECK);
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -111,7 +111,7 @@
 };
 
 
-// Stack-allocated helper class used to assciate a stub code with a name.
+// Stack-allocated helper class used to associate a stub code with a name.
 // All stub code generating functions that use a StubCodeMark will be registered
 // in the global StubCodeDesc list and the generated stub code can be identified
 // later via an address pointing into it.
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -737,10 +737,10 @@
 }
 
 // Be aware of this method could revoke bias of the lock object.
-// This method querys the ownership of the lock handle specified by 'h_obj'.
+// This method queries the ownership of the lock handle specified by 'h_obj'.
 // If the current thread owns the lock, it returns owner_self. If no
 // thread owns the lock, it returns owner_none. Otherwise, it will return
-// ower_other.
+// owner_other.
 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 (JavaThread *self, Handle h_obj) {
   // The caller must beware this method can revoke bias, and
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -49,7 +49,7 @@
   // to use enter() and exit() in order to make sure user be ware
   // of the performance and semantics difference. They are normally
   // used by ObjectLocker etc. The interpreter and compiler use
-  // assembly copies of these routines. Please keep them synchornized.
+  // assembly copies of these routines. Please keep them synchronized.
   //
   // attempt_rebias flag is used by UseBiasedLocking implementation
   static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
--- a/hotspot/src/share/vm/runtime/thread.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -767,7 +767,7 @@
 void JavaThread::record_jump(address target, address instr, const char* file, int line) {
 
   // This should not need to be atomic as the only way for simultaneous
-  // updates is via interrupts. Even then this should be rare or non-existant
+  // updates is via interrupts. Even then this should be rare or non-existent
   // and we don't care that much anyway.
 
   int index = _jmp_ring_index;
@@ -925,10 +925,10 @@
         // Threads_lock is special, since the safepoint synchronization will not start before this is
         // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
         // since it is used to transfer control between JavaThreads and the VMThread
-        // Do not *exclude* any locks unless you are absolutly sure it is correct. Ask someone else first!
+        // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
         if ( (cur->allow_vm_block() &&
               cur != Threads_lock &&
-              cur != Compile_lock &&               // Temporary: should not be necessary when we get spearate compilation
+              cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
               cur != VMOperationRequest_lock &&
               cur != VMOperationQueue_lock) ||
               cur->rank() == Mutex::special) {
@@ -1271,7 +1271,7 @@
         time_slept = 0;
         time_before_loop = now;
     } else {
-        // need to recalulate since we might have new tasks in _tasks
+        // need to recalculate since we might have new tasks in _tasks
         time_slept = (int) ((now - time_before_loop) / 1000000);
     }
 
@@ -1638,7 +1638,7 @@
   // initialize thread-local alloc buffer related fields
   this->initialize_tlab();
 
-  // used to test validitity of stack trace backs
+  // used to test validity of stack trace backs
   this->record_base_of_stack_pointer();
 
   // Record real stack base and size.
@@ -3320,7 +3320,7 @@
   // Initialize system properties.
   Arguments::init_system_properties();
 
-  // So that JDK version can be used as a discrimintor when parsing arguments
+  // So that JDK version can be used as a discriminator when parsing arguments
   JDK_Version_init();
 
   // Update/Initialize System properties after JDK version number is known
@@ -3359,7 +3359,7 @@
   jint adjust_after_os_result = Arguments::adjust_after_os();
   if (adjust_after_os_result != JNI_OK) return adjust_after_os_result;
 
-  // intialize TLS
+  // initialize TLS
   ThreadLocalStorage::init();
 
   // Bootstrap native memory tracking, so it can start recording memory
@@ -4156,7 +4156,7 @@
 // but the garbage collector must provide a safe context for them to run.
 // In particular, these things should never be called when the Threads_lock
 // is held by some other thread. (Note: the Safepoint abstraction also
-// uses the Threads_lock to gurantee this property. It also makes sure that
+// uses the Threads_lock to guarantee this property. It also makes sure that
 // all threads gets blocked when exiting or starting).
 
 void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
--- a/hotspot/src/share/vm/runtime/thread.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -1231,7 +1231,7 @@
   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
 
-  // Side structure for defering update of java frame locals until deopt occurs
+  // Side structure for deferring update of java frame locals until deopt occurs
   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
   void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
 
--- a/hotspot/src/share/vm/runtime/unhandledOops.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/unhandledOops.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -34,7 +34,7 @@
 // destructor.  The constructor adds the oop address on a list
 // off each thread and the destructor removes the oop.  At a potential
 // safepoint, the stack addresses of the local variable oops are trashed
-// with a recognizeable value.  If the local variable is used again, it
+// with a recognizable value.  If the local variable is used again, it
 // will segfault, indicating an unsafe use of that oop.
 // eg:
 //    oop o;    //register &o on list
--- a/hotspot/src/share/vm/runtime/vframeArray.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/vframeArray.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -53,7 +53,7 @@
 
     frame _frame;                                                // the interpreter frame we will unpack into
     int  _bci;                                                   // raw bci for this vframe
-    bool _reexecute;                                             // whether sould we reexecute this bytecode
+    bool _reexecute;                                             // whether we should reexecute this bytecode
     Method*    _method;                                          // the method for this vframe
     MonitorChunk* _monitors;                                     // active monitors for this vframe
     StackValueCollection* _locals;
@@ -158,7 +158,7 @@
   // Tells whether index is within bounds.
   bool is_within_bounds(int index) const        { return 0 <= index && index < frames(); }
 
-  // Accessores for instance variable
+  // Accessories for instance variable
   int frames() const                            { return _frames;   }
 
   static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Thu Jan 23 14:47:23 2014 +0100
@@ -551,10 +551,10 @@
 
   // Determine which regions need to grow in this expand_by call.
   // If you are growing in the lower region, high() must be in that
-  // region so calcuate the size based on high().  For the middle and
+  // region so calculate the size based on high().  For the middle and
   // upper regions, determine the starting point of growth based on the
   // location of high().  By getting the MAX of the region's low address
-  // (or the prevoius region's high address) and high(), we can tell if it
+  // (or the previous region's high address) and high(), we can tell if it
   // is an intra or inter region growth.
   size_t lower_needs = 0;
   if (aligned_lower_new_high > lower_high()) {
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -154,7 +154,7 @@
   void set_next(VM_Operation *next)              { _next = next; }
   void set_prev(VM_Operation *prev)              { _prev = prev; }
 
-  // Configuration. Override these appropriatly in subclasses.
+  // Configuration. Override these appropriately in subclasses.
   virtual VMOp_Type type() const = 0;
   virtual Mode evaluation_mode() const            { return _safepoint; }
   virtual bool allow_nested_vm_operations() const { return false; }
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Tue Jan 14 16:40:33 2014 +0100
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Jan 23 14:47:23 2014 +0100
@@ -149,7 +149,7 @@
 // The larger HeapWordSize for 64bit requires larger heaps
 // for the same application running in 64bit.  See bug 4967770.
 // The minimum alignment to a heap word size is done.  Other
-// parts of the memory system may required additional alignment
+// parts of the memory system may require additional alignment
 // and are responsible for those alignments.
 #ifdef _LP64
 #define ScaleForWordSize(x) align_size_down_((x) * 13 / 10, HeapWordSize)