--- a/src/hotspot/share/prims/jvm.cpp Mon Nov 20 12:04:13 2017 +0100
+++ b/src/hotspot/share/prims/jvm.cpp Mon Nov 20 13:07:44 2017 +0100
@@ -35,12 +35,12 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
-#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecode.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
+#include "oops/access.inline.hpp"
#include "oops/fieldStreams.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/method.hpp"
@@ -652,24 +652,7 @@
new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL);
}
- // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
- // is modifying a reference field in the clonee, a non-oop-atomic copy might
- // be suspended in the middle of copying the pointer and end up with parts
- // of two different pointers in the field. Subsequent dereferences will crash.
- // 4846409: an oop-copy of objects with long or double fields or arrays of same
- // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
- // of oops. We know objects are aligned on a minimum of an jlong boundary.
- // The same is true of StubRoutines::object_copy and the various oop_copy
- // variants, and of the code generated by the inline_native_clone intrinsic.
- assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
- Copy::conjoint_jlongs_atomic((jlong*)obj(), (jlong*)new_obj_oop,
- align_object_size(size) / HeapWordsPerLong);
- // Clear the header
- new_obj_oop->init_mark();
-
- // Store check (mark entire object and let gc sort it out)
- BarrierSet* bs = Universe::heap()->barrier_set();
- bs->write_region(MemRegion((HeapWord*)new_obj_oop, size));
+ HeapAccess<>::clone(obj(), new_obj_oop, size);
Handle new_obj(THREAD, new_obj_oop);
// Caution: this involves a java upcall, so the clone should be