|
1 /* |
|
2 * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "c1/c1_FrameMap.hpp" |
|
27 #include "c1/c1_LIR.hpp" |
|
28 #include "runtime/sharedRuntime.hpp" |
|
29 #include "vmreg_arm.inline.hpp" |
|
30 |
|
31 LIR_Opr FrameMap::R0_opr; |
|
32 LIR_Opr FrameMap::R1_opr; |
|
33 LIR_Opr FrameMap::R2_opr; |
|
34 LIR_Opr FrameMap::R3_opr; |
|
35 LIR_Opr FrameMap::R4_opr; |
|
36 LIR_Opr FrameMap::R5_opr; |
|
37 |
|
38 LIR_Opr FrameMap::R0_oop_opr; |
|
39 LIR_Opr FrameMap::R1_oop_opr; |
|
40 LIR_Opr FrameMap::R2_oop_opr; |
|
41 LIR_Opr FrameMap::R3_oop_opr; |
|
42 LIR_Opr FrameMap::R4_oop_opr; |
|
43 LIR_Opr FrameMap::R5_oop_opr; |
|
44 |
|
45 LIR_Opr FrameMap::R0_metadata_opr; |
|
46 LIR_Opr FrameMap::R1_metadata_opr; |
|
47 LIR_Opr FrameMap::R2_metadata_opr; |
|
48 LIR_Opr FrameMap::R3_metadata_opr; |
|
49 LIR_Opr FrameMap::R4_metadata_opr; |
|
50 LIR_Opr FrameMap::R5_metadata_opr; |
|
51 |
|
52 #ifdef AARCH64 |
|
53 LIR_Opr FrameMap::ZR_opr; |
|
54 #endif // AARCH64 |
|
55 |
|
56 LIR_Opr FrameMap::LR_opr; |
|
57 LIR_Opr FrameMap::LR_oop_opr; |
|
58 LIR_Opr FrameMap::LR_ptr_opr; |
|
59 LIR_Opr FrameMap::FP_opr; |
|
60 LIR_Opr FrameMap::SP_opr; |
|
61 LIR_Opr FrameMap::Rthread_opr; |
|
62 |
|
63 LIR_Opr FrameMap::Int_result_opr; |
|
64 LIR_Opr FrameMap::Long_result_opr; |
|
65 LIR_Opr FrameMap::Object_result_opr; |
|
66 LIR_Opr FrameMap::Float_result_opr; |
|
67 LIR_Opr FrameMap::Double_result_opr; |
|
68 |
|
69 LIR_Opr FrameMap::Exception_oop_opr; |
|
70 LIR_Opr FrameMap::Exception_pc_opr; |
|
71 |
|
72 LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0 }; |
|
73 LIR_Opr FrameMap::_caller_save_fpu_regs[]; // same as initialize to zero |
|
74 |
|
75 LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { |
|
76 LIR_Opr opr = LIR_OprFact::illegalOpr; |
|
77 VMReg r_1 = reg->first(); |
|
78 VMReg r_2 = reg->second(); |
|
79 if (r_1->is_stack()) { |
|
80 int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; |
|
81 opr = LIR_OprFact::address(new LIR_Address(SP_opr, st_off, type)); |
|
82 } else if (r_1->is_Register()) { |
|
83 Register reg = r_1->as_Register(); |
|
84 if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) { |
|
85 #ifdef AARCH64 |
|
86 assert(r_1->next() == r_2, "should be the same"); |
|
87 opr = as_long_opr(reg); |
|
88 #else |
|
89 opr = as_long_opr(reg, r_2->as_Register()); |
|
90 #endif |
|
91 } else if (type == T_OBJECT || type == T_ARRAY) { |
|
92 opr = as_oop_opr(reg); |
|
93 } else if (type == T_METADATA) { |
|
94 opr = as_metadata_opr(reg); |
|
95 } else { |
|
96 // PreferInterpreterNativeStubs should ensure we never need to |
|
97 // handle a long opr passed as R3+stack_slot |
|
98 assert(! r_2->is_stack(), "missing support for ALIGN_WIDE_ARGUMENTS==0"); |
|
99 opr = as_opr(reg); |
|
100 } |
|
101 } else if (r_1->is_FloatRegister()) { |
|
102 FloatRegister reg = r_1->as_FloatRegister(); |
|
103 opr = type == T_FLOAT ? as_float_opr(reg) : as_double_opr(reg); |
|
104 } else { |
|
105 ShouldNotReachHere(); |
|
106 } |
|
107 return opr; |
|
108 } |
|
109 |
|
110 |
|
111 void FrameMap::initialize() { |
|
112 if (_init_done) return; |
|
113 |
|
114 int i; |
|
115 int rnum = 0; |
|
116 |
|
117 // Registers used for allocation |
|
118 #ifdef AARCH64 |
|
119 assert(Rthread == R28 && Rheap_base == R27 && Rtemp == R16, "change the code here"); |
|
120 for (i = 0; i < 16; i++) { |
|
121 map_register(rnum++, as_Register(i)); |
|
122 } |
|
123 for (i = 17; i < 28; i++) { |
|
124 map_register(rnum++, as_Register(i)); |
|
125 } |
|
126 #else |
|
127 assert(Rthread == R10 && Rtemp == R12, "change the code here"); |
|
128 for (i = 0; i < 10; i++) { |
|
129 map_register(rnum++, as_Register(i)); |
|
130 } |
|
131 #endif // AARCH64 |
|
132 assert(rnum == pd_nof_cpu_regs_reg_alloc, "should be"); |
|
133 |
|
134 // Registers not used for allocation |
|
135 map_register(rnum++, LR); // LR register should be listed first, see c1_LinearScan_arm.hpp::is_processed_reg_num. |
|
136 assert(rnum == pd_nof_cpu_regs_processed_in_linearscan, "should be"); |
|
137 |
|
138 map_register(rnum++, Rtemp); |
|
139 map_register(rnum++, Rthread); |
|
140 map_register(rnum++, FP); // ARM32: R7 or R11 |
|
141 map_register(rnum++, SP); |
|
142 #ifdef AARCH64 |
|
143 map_register(rnum++, ZR); |
|
144 #else |
|
145 map_register(rnum++, PC); |
|
146 #endif |
|
147 assert(rnum == pd_nof_cpu_regs_frame_map, "should be"); |
|
148 |
|
149 _init_done = true; |
|
150 |
|
151 R0_opr = as_opr(R0); R0_oop_opr = as_oop_opr(R0); R0_metadata_opr = as_metadata_opr(R0); |
|
152 R1_opr = as_opr(R1); R1_oop_opr = as_oop_opr(R1); R1_metadata_opr = as_metadata_opr(R1); |
|
153 R2_opr = as_opr(R2); R2_oop_opr = as_oop_opr(R2); R2_metadata_opr = as_metadata_opr(R2); |
|
154 R3_opr = as_opr(R3); R3_oop_opr = as_oop_opr(R3); R3_metadata_opr = as_metadata_opr(R3); |
|
155 R4_opr = as_opr(R4); R4_oop_opr = as_oop_opr(R4); R4_metadata_opr = as_metadata_opr(R4); |
|
156 R5_opr = as_opr(R5); R5_oop_opr = as_oop_opr(R5); R5_metadata_opr = as_metadata_opr(R5); |
|
157 |
|
158 #ifdef AARCH64 |
|
159 ZR_opr = as_opr(ZR); |
|
160 #endif // AARCH64 |
|
161 |
|
162 LR_opr = as_opr(LR); |
|
163 LR_oop_opr = as_oop_opr(LR); |
|
164 LR_ptr_opr = as_pointer_opr(LR); |
|
165 FP_opr = as_pointer_opr(FP); |
|
166 SP_opr = as_pointer_opr(SP); |
|
167 Rthread_opr = as_pointer_opr(Rthread); |
|
168 |
|
169 // LIR operands for result |
|
170 Int_result_opr = R0_opr; |
|
171 Object_result_opr = R0_oop_opr; |
|
172 #ifdef AARCH64 |
|
173 Long_result_opr = as_long_opr(R0); |
|
174 Float_result_opr = as_float_opr(S0); |
|
175 Double_result_opr = as_double_opr(D0); |
|
176 #else |
|
177 Long_result_opr = as_long_opr(R0, R1); |
|
178 #ifdef __ABI_HARD__ |
|
179 Float_result_opr = as_float_opr(S0); |
|
180 Double_result_opr = as_double_opr(D0); |
|
181 #else |
|
182 Float_result_opr = LIR_OprFact::single_softfp(0); |
|
183 Double_result_opr = LIR_OprFact::double_softfp(0, 1); |
|
184 #endif // __ABI_HARD__ |
|
185 #endif // AARCH64 |
|
186 |
|
187 Exception_oop_opr = as_oop_opr(Rexception_obj); |
|
188 Exception_pc_opr = as_opr(Rexception_pc); |
|
189 |
|
190 for (i = 0; i < nof_caller_save_cpu_regs(); i++) { |
|
191 _caller_save_cpu_regs[i] = LIR_OprFact::single_cpu(i); |
|
192 } |
|
193 for (i = 0; i < nof_caller_save_fpu_regs; i++) { |
|
194 _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); |
|
195 } |
|
196 } |
|
197 |
|
198 |
|
199 Address FrameMap::make_new_address(ByteSize sp_offset) const { |
|
200 return Address(SP, sp_offset); |
|
201 } |
|
202 |
|
203 LIR_Opr FrameMap::stack_pointer() { |
|
204 return FrameMap::SP_opr; |
|
205 } |
|
206 |
|
207 LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { |
|
208 assert(Rmh_SP_save == FP, "Fix register used for saving SP for MethodHandle calls"); |
|
209 return FP_opr; |
|
210 } |
|
211 |
|
212 bool FrameMap::validate_frame() { |
|
213 int max_offset = in_bytes(framesize_in_bytes()); |
|
214 int java_index = 0; |
|
215 for (int i = 0; i < _incoming_arguments->length(); i++) { |
|
216 LIR_Opr opr = _incoming_arguments->at(i); |
|
217 if (opr->is_stack()) { |
|
218 int arg_offset = _argument_locations->at(java_index); |
|
219 if (arg_offset > max_offset) { |
|
220 max_offset = arg_offset; |
|
221 } |
|
222 } |
|
223 java_index += type2size[opr->type()]; |
|
224 } |
|
225 return max_offset < AARCH64_ONLY(16384) NOT_AARCH64(4096); // TODO-AARCH64 check that LIRAssembler does not generate load/store of byte and half-word with SP as address base |
|
226 } |
|
227 |
|
228 VMReg FrameMap::fpu_regname(int n) { |
|
229 return as_FloatRegister(n)->as_VMReg(); |
|
230 } |