|
1 # |
|
2 # ---------------------------------------------------------------------------------------------------- |
|
3 # |
|
4 # Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. |
|
5 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
6 # |
|
7 # This code is free software; you can redistribute it and/or modify it |
|
8 # under the terms of the GNU General Public License version 2 only, as |
|
9 # published by the Free Software Foundation. |
|
10 # |
|
11 # This code is distributed in the hope that it will be useful, but WITHOUT |
|
12 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
13 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
14 # version 2 for more details (a copy is included in the LICENSE file that |
|
15 # accompanied this code). |
|
16 # |
|
17 # You should have received a copy of the GNU General Public License version |
|
18 # 2 along with this work; if not, write to the Free Software Foundation, |
|
19 # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
20 # |
|
21 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
22 # or visit www.oracle.com if you need additional information or have any |
|
23 # questions. |
|
24 # |
|
25 # ---------------------------------------------------------------------------------------------------- |
|
26 |
|
27 import sanitycheck |
|
28 import itertools |
|
29 import json |
|
30 |
|
31 import mx |
|
32 import mx_graal |
|
33 |
|
34 def _run_benchmark(args, availableBenchmarks, runBenchmark): |
|
35 |
|
36 vmOpts, benchmarksAndOptions = mx.extract_VM_args(args, useDoubleDash=availableBenchmarks is None) |
|
37 |
|
38 if availableBenchmarks is None: |
|
39 harnessArgs = benchmarksAndOptions |
|
40 return runBenchmark(None, harnessArgs, vmOpts) |
|
41 |
|
42 if len(benchmarksAndOptions) == 0: |
|
43 mx.abort('at least one benchmark name or "all" must be specified') |
|
44 benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions)) |
|
45 harnessArgs = benchmarksAndOptions[len(benchmarks):] |
|
46 |
|
47 if 'all' in benchmarks: |
|
48 benchmarks = availableBenchmarks |
|
49 else: |
|
50 for bm in benchmarks: |
|
51 if bm not in availableBenchmarks: |
|
52 mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks)) |
|
53 |
|
54 failed = [] |
|
55 for bm in benchmarks: |
|
56 if not runBenchmark(bm, harnessArgs, vmOpts): |
|
57 failed.append(bm) |
|
58 |
|
59 if len(failed) != 0: |
|
60 mx.abort('Benchmark failures: ' + str(failed)) |
|
61 |
|
62 def deoptalot(args): |
|
63 """bootstrap a VM with DeoptimizeALot and VerifyOops on |
|
64 |
|
65 If the first argument is a number, the process will be repeated |
|
66 this number of times. All other arguments are passed to the VM.""" |
|
67 count = 1 |
|
68 if len(args) > 0 and args[0].isdigit(): |
|
69 count = int(args[0]) |
|
70 del args[0] |
|
71 |
|
72 for _ in range(count): |
|
73 if not mx_graal.run_vm(['-XX:-TieredCompilation', '-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version']) == 0: |
|
74 mx.abort("Failed") |
|
75 |
|
76 def longtests(args): |
|
77 |
|
78 deoptalot(['15', '-Xmx48m']) |
|
79 |
|
80 dacapo(['100', 'eclipse', '-esa']) |
|
81 |
|
82 def dacapo(args): |
|
83 """run one or more DaCapo benchmarks""" |
|
84 |
|
85 def launcher(bm, harnessArgs, extraVmOpts): |
|
86 return sanitycheck.getDacapo(bm, harnessArgs).test(mx_graal.get_vm(), extraVmOpts=extraVmOpts) |
|
87 |
|
88 _run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher) |
|
89 |
|
90 def scaladacapo(args): |
|
91 """run one or more Scala DaCapo benchmarks""" |
|
92 |
|
93 def launcher(bm, harnessArgs, extraVmOpts): |
|
94 return sanitycheck.getScalaDacapo(bm, harnessArgs).test(mx_graal.get_vm(), extraVmOpts=extraVmOpts) |
|
95 |
|
96 _run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher) |
|
97 |
|
98 |
|
99 """ |
|
100 Extra benchmarks to run from 'bench()'. |
|
101 """ |
|
102 extraBenchmarks = [] |
|
103 |
|
104 def bench(args): |
|
105 """run benchmarks and parse their output for results |
|
106 |
|
107 Results are JSON formated : {group : {benchmark : score}}.""" |
|
108 resultFile = None |
|
109 if '-resultfile' in args: |
|
110 index = args.index('-resultfile') |
|
111 if index + 1 < len(args): |
|
112 resultFile = args[index + 1] |
|
113 del args[index] |
|
114 del args[index] |
|
115 else: |
|
116 mx.abort('-resultfile must be followed by a file name') |
|
117 resultFileCSV = None |
|
118 if '-resultfilecsv' in args: |
|
119 index = args.index('-resultfilecsv') |
|
120 if index + 1 < len(args): |
|
121 resultFileCSV = args[index + 1] |
|
122 del args[index] |
|
123 del args[index] |
|
124 else: |
|
125 mx.abort('-resultfilecsv must be followed by a file name') |
|
126 vm = mx_graal.get_vm() |
|
127 if len(args) is 0: |
|
128 args = ['all'] |
|
129 |
|
130 vmArgs = [arg for arg in args if arg.startswith('-')] |
|
131 |
|
132 def benchmarks_in_group(group): |
|
133 prefix = group + ':' |
|
134 return [a[len(prefix):] for a in args if a.startswith(prefix)] |
|
135 |
|
136 results = {} |
|
137 benchmarks = [] |
|
138 # DaCapo |
|
139 if 'dacapo' in args or 'all' in args: |
|
140 benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark) |
|
141 else: |
|
142 dacapos = benchmarks_in_group('dacapo') |
|
143 for dacapo in dacapos: |
|
144 if dacapo not in sanitycheck.dacapoSanityWarmup.keys(): |
|
145 mx.abort('Unknown DaCapo : ' + dacapo) |
|
146 iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark] |
|
147 if iterations > 0: |
|
148 benchmarks += [sanitycheck.getDacapo(dacapo, ['-n', str(iterations)])] |
|
149 |
|
150 if 'scaladacapo' in args or 'all' in args: |
|
151 benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark) |
|
152 else: |
|
153 scaladacapos = benchmarks_in_group('scaladacapo') |
|
154 for scaladacapo in scaladacapos: |
|
155 if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys(): |
|
156 mx.abort('Unknown Scala DaCapo : ' + scaladacapo) |
|
157 iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark] |
|
158 if iterations > 0: |
|
159 benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])] |
|
160 |
|
161 # Bootstrap |
|
162 if 'bootstrap' in args or 'all' in args: |
|
163 benchmarks += sanitycheck.getBootstraps() |
|
164 # SPECjvm2008 |
|
165 if 'specjvm2008' in args or 'all' in args: |
|
166 benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])] |
|
167 else: |
|
168 specjvms = benchmarks_in_group('specjvm2008') |
|
169 for specjvm in specjvms: |
|
170 benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])] |
|
171 |
|
172 if 'specjbb2005' in args or 'all' in args: |
|
173 benchmarks += [sanitycheck.getSPECjbb2005()] |
|
174 |
|
175 if 'specjbb2013' in args: # or 'all' in args //currently not in default set |
|
176 benchmarks += [sanitycheck.getSPECjbb2013()] |
|
177 |
|
178 if 'ctw-full' in args: |
|
179 benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full)) |
|
180 if 'ctw-noinline' in args: |
|
181 benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline)) |
|
182 |
|
183 for f in extraBenchmarks: |
|
184 f(args, vm, benchmarks) |
|
185 |
|
186 for test in benchmarks: |
|
187 for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items(): |
|
188 group = results.setdefault(groupName, {}) |
|
189 group.update(res) |
|
190 mx.log(json.dumps(results)) |
|
191 if resultFile: |
|
192 with open(resultFile, 'w') as f: |
|
193 f.write(json.dumps(results)) |
|
194 if resultFileCSV: |
|
195 with open(resultFileCSV, 'w') as f: |
|
196 for key1, value1 in results.iteritems(): |
|
197 f.write('%s;\n' % (str(key1))) |
|
198 for key2, value2 in sorted(value1.iteritems()): |
|
199 f.write('%s; %s;\n' % (str(key2), str(value2))) |
|
200 |
|
201 def specjvm2008(args): |
|
202 """run one or more SPECjvm2008 benchmarks""" |
|
203 |
|
204 def launcher(bm, harnessArgs, extraVmOpts): |
|
205 return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(mx_graal.get_vm(), extraVmOpts=extraVmOpts) |
|
206 |
|
207 availableBenchmarks = set(sanitycheck.specjvm2008Names) |
|
208 if "all" not in args: |
|
209 # only add benchmark groups if we are not running "all" |
|
210 for name in sanitycheck.specjvm2008Names: |
|
211 parts = name.rsplit('.', 1) |
|
212 if len(parts) > 1: |
|
213 assert len(parts) == 2 |
|
214 group = parts[0] |
|
215 availableBenchmarks.add(group) |
|
216 |
|
217 _run_benchmark(args, sorted(availableBenchmarks), launcher) |
|
218 |
|
219 def specjbb2013(args): |
|
220 """run the composite SPECjbb2013 benchmark""" |
|
221 |
|
222 def launcher(bm, harnessArgs, extraVmOpts): |
|
223 assert bm is None |
|
224 return sanitycheck.getSPECjbb2013(harnessArgs).bench(mx_graal.get_vm(), extraVmOpts=extraVmOpts) |
|
225 |
|
226 _run_benchmark(args, None, launcher) |
|
227 |
|
228 def specjbb2015(args): |
|
229 """run the composite SPECjbb2015 benchmark""" |
|
230 |
|
231 def launcher(bm, harnessArgs, extraVmOpts): |
|
232 assert bm is None |
|
233 return sanitycheck.getSPECjbb2015(harnessArgs).bench(mx_graal.get_vm(), extraVmOpts=extraVmOpts) |
|
234 |
|
235 _run_benchmark(args, None, launcher) |
|
236 |
|
237 def specjbb2005(args): |
|
238 """run the composite SPECjbb2005 benchmark""" |
|
239 |
|
240 def launcher(bm, harnessArgs, extraVmOpts): |
|
241 assert bm is None |
|
242 return sanitycheck.getSPECjbb2005(harnessArgs).bench(mx_graal.get_vm(), extraVmOpts=extraVmOpts) |
|
243 |
|
244 _run_benchmark(args, None, launcher) |
|
245 |
|
246 mx.update_commands(mx.suite('graal'), { |
|
247 'dacapo': [dacapo, '[VM options] benchmarks...|"all" [DaCapo options]'], |
|
248 'scaladacapo': [scaladacapo, '[VM options] benchmarks...|"all" [Scala DaCapo options]'], |
|
249 'specjvm2008': [specjvm2008, '[VM options] benchmarks...|"all" [SPECjvm2008 options]'], |
|
250 'specjbb2013': [specjbb2013, '[VM options] [-- [SPECjbb2013 options]]'], |
|
251 'specjbb2015': [specjbb2015, '[VM options] [-- [SPECjbb2015 options]]'], |
|
252 'specjbb2005': [specjbb2005, '[VM options] [-- [SPECjbb2005 options]]'], |
|
253 'bench' : [bench, '[-resultfile file] [all(default)|dacapo|specjvm2008|bootstrap]'], |
|
254 'deoptalot' : [deoptalot, '[n]'], |
|
255 'longtests' : [longtests, ''], |
|
256 }) |