1 /* |
1 /* |
2 * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
27 #include "utilities/align.hpp" |
27 #include "utilities/align.hpp" |
28 #include "utilities/copy.hpp" |
28 #include "utilities/copy.hpp" |
29 |
29 |
30 |
30 |
31 // Copy bytes; larger units are filled atomically if everything is aligned. |
31 // Copy bytes; larger units are filled atomically if everything is aligned. |
32 void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) { |
32 void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) { |
33 address src = (address) from; |
33 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size; |
34 address dst = (address) to; |
|
35 uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size; |
|
36 |
34 |
37 // (Note: We could improve performance by ignoring the low bits of size, |
35 // (Note: We could improve performance by ignoring the low bits of size, |
38 // and putting a short cleanup loop after each bulk copy loop. |
36 // and putting a short cleanup loop after each bulk copy loop. |
39 // There are plenty of other ways to make this faster also, |
37 // There are plenty of other ways to make this faster also, |
40 // and it's a slippery slope. For now, let's keep this code simple |
38 // and it's a slippery slope. For now, let's keep this code simple |
41 // since the simplicity helps clarify the atomicity semantics of |
39 // since the simplicity helps clarify the atomicity semantics of |
42 // this operation. There are also CPU-specific assembly versions |
40 // this operation. There are also CPU-specific assembly versions |
43 // which may or may not want to include such optimizations.) |
41 // which may or may not want to include such optimizations.) |
44 |
42 |
45 if (bits % sizeof(jlong) == 0) { |
43 if (bits % sizeof(jlong) == 0) { |
46 Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong)); |
44 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong)); |
47 } else if (bits % sizeof(jint) == 0) { |
45 } else if (bits % sizeof(jint) == 0) { |
48 Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint)); |
46 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint)); |
49 } else if (bits % sizeof(jshort) == 0) { |
47 } else if (bits % sizeof(jshort) == 0) { |
50 Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort)); |
48 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort)); |
51 } else { |
49 } else { |
52 // Not aligned, so no need to be atomic. |
50 // Not aligned, so no need to be atomic. |
53 Copy::conjoint_jbytes((void*) src, (void*) dst, size); |
51 Copy::conjoint_jbytes((const void*) from, (void*) to, size); |
54 } |
52 } |
55 } |
53 } |
56 |
54 |
57 class CopySwap : AllStatic { |
55 class CopySwap : AllStatic { |
58 public: |
56 public: |