1081 #else |
1081 #else |
1082 JVMState* dbg_jvms() const { return NULL; } |
1082 JVMState* dbg_jvms() const { return NULL; } |
1083 #endif |
1083 #endif |
1084 }; |
1084 }; |
1085 |
1085 |
1086 class GraphKit; |
|
1087 |
|
1088 class ArrayCopyNode : public CallNode { |
|
1089 private: |
|
1090 |
|
1091 // What kind of arraycopy variant is this? |
|
1092 enum { |
|
1093 None, // not set yet |
|
1094 ArrayCopy, // System.arraycopy() |
|
1095 CloneBasic, // A clone that can be copied by 64 bit chunks |
|
1096 CloneOop, // An oop array clone |
|
1097 CopyOf, // Arrays.copyOf() |
|
1098 CopyOfRange // Arrays.copyOfRange() |
|
1099 } _kind; |
|
1100 |
|
1101 #ifndef PRODUCT |
|
1102 static const char* _kind_names[CopyOfRange+1]; |
|
1103 #endif |
|
1104 // Is the alloc obtained with |
|
1105 // AllocateArrayNode::Ideal_array_allocation() tighly coupled |
|
1106 // (arraycopy follows immediately the allocation)? |
|
1107 // We cache the result of LibraryCallKit::tightly_coupled_allocation |
|
1108 // here because it's much easier to find whether there's a tightly |
|
1109 // couple allocation at parse time than at macro expansion time. At |
|
1110 // macro expansion time, for every use of the allocation node we |
|
1111 // would need to figure out whether it happens after the arraycopy (and |
|
1112 // can be ignored) or between the allocation and the arraycopy. At |
|
1113 // parse time, it's straightforward because whatever happens after |
|
1114 // the arraycopy is not parsed yet so doesn't exist when |
|
1115 // LibraryCallKit::tightly_coupled_allocation() is called. |
|
1116 bool _alloc_tightly_coupled; |
|
1117 |
|
1118 bool _arguments_validated; |
|
1119 |
|
1120 static const TypeFunc* arraycopy_type() { |
|
1121 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); |
|
1122 fields[Src] = TypeInstPtr::BOTTOM; |
|
1123 fields[SrcPos] = TypeInt::INT; |
|
1124 fields[Dest] = TypeInstPtr::BOTTOM; |
|
1125 fields[DestPos] = TypeInt::INT; |
|
1126 fields[Length] = TypeInt::INT; |
|
1127 fields[SrcLen] = TypeInt::INT; |
|
1128 fields[DestLen] = TypeInt::INT; |
|
1129 fields[SrcKlass] = TypeKlassPtr::BOTTOM; |
|
1130 fields[DestKlass] = TypeKlassPtr::BOTTOM; |
|
1131 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); |
|
1132 |
|
1133 // create result type (range) |
|
1134 fields = TypeTuple::fields(0); |
|
1135 |
|
1136 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); |
|
1137 |
|
1138 return TypeFunc::make(domain, range); |
|
1139 } |
|
1140 |
|
1141 ArrayCopyNode(Compile* C, bool alloc_tightly_coupled); |
|
1142 |
|
1143 int get_count(PhaseGVN *phase) const; |
|
1144 static const TypePtr* get_address_type(PhaseGVN *phase, Node* n); |
|
1145 |
|
1146 Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count); |
|
1147 bool finish_transform(PhaseGVN *phase, bool can_reshape, |
|
1148 Node* ctl, Node *mem); |
|
1149 |
|
1150 public: |
|
1151 |
|
1152 enum { |
|
1153 Src = TypeFunc::Parms, |
|
1154 SrcPos, |
|
1155 Dest, |
|
1156 DestPos, |
|
1157 Length, |
|
1158 SrcLen, |
|
1159 DestLen, |
|
1160 SrcKlass, |
|
1161 DestKlass, |
|
1162 ParmLimit |
|
1163 }; |
|
1164 |
|
1165 static ArrayCopyNode* make(GraphKit* kit, bool may_throw, |
|
1166 Node* src, Node* src_offset, |
|
1167 Node* dest, Node* dest_offset, |
|
1168 Node* length, |
|
1169 bool alloc_tightly_coupled, |
|
1170 Node* src_klass = NULL, Node* dest_klass = NULL, |
|
1171 Node* src_length = NULL, Node* dest_length = NULL); |
|
1172 |
|
1173 void connect_outputs(GraphKit* kit); |
|
1174 |
|
1175 bool is_arraycopy() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; } |
|
1176 bool is_arraycopy_validated() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; } |
|
1177 bool is_clonebasic() const { assert(_kind != None, "should bet set"); return _kind == CloneBasic; } |
|
1178 bool is_cloneoop() const { assert(_kind != None, "should bet set"); return _kind == CloneOop; } |
|
1179 bool is_copyof() const { assert(_kind != None, "should bet set"); return _kind == CopyOf; } |
|
1180 bool is_copyofrange() const { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; } |
|
1181 |
|
1182 void set_arraycopy(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; } |
|
1183 void set_clonebasic() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; } |
|
1184 void set_cloneoop() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; } |
|
1185 void set_copyof() { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = false; } |
|
1186 void set_copyofrange() { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = false; } |
|
1187 |
|
1188 virtual int Opcode() const; |
|
1189 virtual uint size_of() const; // Size is bigger |
|
1190 virtual bool guaranteed_safepoint() { return false; } |
|
1191 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
1192 |
|
1193 bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; } |
|
1194 |
|
1195 #ifndef PRODUCT |
|
1196 virtual void dump_spec(outputStream *st) const; |
|
1197 #endif |
|
1198 }; |
|
1199 #endif // SHARE_VM_OPTO_CALLNODE_HPP |
1086 #endif // SHARE_VM_OPTO_CALLNODE_HPP |