|
1 # |
|
2 # Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 # |
|
5 # This code is free software; you can redistribute it and/or modify it |
|
6 # under the terms of the GNU General Public License version 2 only, as |
|
7 # published by the Free Software Foundation. Oracle designates this |
|
8 # particular file as subject to the "Classpath" exception as provided |
|
9 # by Oracle in the LICENSE file that accompanied this code. |
|
10 # |
|
11 # This code is distributed in the hope that it will be useful, but WITHOUT |
|
12 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
13 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
14 # version 2 for more details (a copy is included in the LICENSE file that |
|
15 # accompanied this code). |
|
16 # |
|
17 # You should have received a copy of the GNU General Public License version |
|
18 # 2 along with this work; if not, write to the Free Software Foundation, |
|
19 # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
20 # |
|
21 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
22 # or visit www.oracle.com if you need additional information or have any |
|
23 # questions. |
|
24 # |
|
25 |
|
26 default: all |
|
27 |
|
28 include $(SPEC) |
|
29 include MakeBase.gmk |
|
30 include FindTests.gmk |
|
31 |
|
32 # We will always run multiple tests serially |
|
33 .NOTPARALLEL: |
|
34 |
|
35 # Hook to include the corresponding custom file, if present. |
|
36 $(eval $(call IncludeCustomExtension, , RunTests.gmk)) |
|
37 |
|
38 TEST_RESULTS_DIR := $(BUILD_OUTPUT)/test-results |
|
39 TEST_SUPPORT_DIR := $(BUILD_OUTPUT)/test-support |
|
40 |
|
41 |
|
42 ################################################################################ |
|
43 # Parse control variables |
|
44 ################################################################################ |
|
45 |
|
46 $(eval $(call ParseKeywordVariable, JTREG, \ |
|
47 KEYWORDS := JOBS TIMEOUT TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM, \ |
|
48 STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS, \ |
|
49 )) |
|
50 |
|
51 ifneq ($(JTREG), ) |
|
52 # Inform the user |
|
53 $(info Running tests using JTREG control variable '$(JTREG)') |
|
54 endif |
|
55 |
|
56 $(eval $(call ParseKeywordVariable, GTEST, \ |
|
57 KEYWORDS := REPEAT, \ |
|
58 STRING_KEYWORDS := OPTIONS, \ |
|
59 )) |
|
60 |
|
61 ifneq ($(GTEST), ) |
|
62 # Inform the user |
|
63 $(info Running tests using GTEST control variable '$(GTEST)') |
|
64 endif |
|
65 |
|
66 |
|
67 ################################################################################ |
|
68 # Component-specific Jtreg settings |
|
69 ################################################################################ |
|
70 |
|
71 ifeq ($(TEST_JOBS), 0) |
|
72 # If TEST_JOBS is not specified, hotspot fallback default is |
|
73 # min(num_cores / 2, 12). |
|
74 hotspot_JTREG_JOBS := $(shell $(EXPR) $(NUM_CORES) / 2) |
|
75 ifeq ($(hotspot_JTREG_JOBS), 0) |
|
76 hotspot_JTREG_JOBS := 1 |
|
77 else ifeq ($(shell $(EXPR) $(hotspot_JTREG_JOBS) \> 12), 1) |
|
78 hotspot_JTREG_JOBS := 12 |
|
79 endif |
|
80 endif |
|
81 |
|
82 hotspot_JTREG_MAX_MEM := 0 |
|
83 hotspot_JTREG_ASSERT := false |
|
84 hotspot_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/hotspot/jtreg/native |
|
85 jdk_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/jdk/jtreg/native |
|
86 |
|
87 |
|
88 ################################################################################ |
|
89 # Parse test selection |
|
90 # |
|
91 # The user has given a test selection in the TEST variable. We must parse it |
|
92 # and determine what that means in terms of actual calls to the test framework. |
|
93 # |
|
94 # The parse functions take as argument a test specification as given by the |
|
95 # user, and returns a fully qualified test descriptor if it was a match, or |
|
96 # nothing if not. A single test specification can result in multiple test |
|
97 # descriptors being returned. A valid test descriptor must always be accepted |
|
98 # and returned identically. |
|
99 ################################################################################ |
|
100 |
|
101 # Helper function to determine if a test specification is a Gtest test |
|
102 # |
|
103 # It is a Gtest test if it is either "gtest", or "gtest:" followed by an optional |
|
104 # test filter string. |
|
105 define ParseGtestTestSelection |
|
106 $(if $(filter gtest%, $1), \ |
|
107 $(if $(filter gtest, $1), \ |
|
108 gtest:all \ |
|
109 , \ |
|
110 $(if $(filter gtest:, $1), \ |
|
111 gtest:all \ |
|
112 , \ |
|
113 $1 \ |
|
114 ) \ |
|
115 ) \ |
|
116 ) |
|
117 endef |
|
118 |
|
119 # Helper function to determine if a test specification is a Jtreg test |
|
120 # |
|
121 # It is a Jtreg test if it optionally begins with jtreg:, and then is either |
|
122 # an unspecified group name (possibly prefixed by :), or a group in a |
|
123 # specified <component>/test directory, or a path to a test or test directory, |
|
124 # either absolute or relative to TOPDIR. |
|
125 define ParseJtregTestSelection |
|
126 $(eval TEST_NAME := $(strip $(patsubst jtreg:%, %, $1))) \ |
|
127 $(if $(or $(findstring :, $(TEST_NAME)), $(findstring /, $(TEST_NAME))), , \ |
|
128 $(eval TEST_NAME := :$(TEST_NAME)) \ |
|
129 ) \ |
|
130 $(if $(findstring :, $(TEST_NAME)), \ |
|
131 $(if $(filter :%, $(TEST_NAME)), \ |
|
132 $(foreach component, $(JTREG_COMPONENTS), \ |
|
133 $(if $(filter $(patsubst :%, %, $(TEST_NAME)), \ |
|
134 $($(component)_JTREG_TEST_GROUPS)), \ |
|
135 jtreg:$(component)/test:$(patsubst :%,%,$(TEST_NAME)) \ |
|
136 ) \ |
|
137 ) \ |
|
138 , \ |
|
139 $(eval COMPONENT := $(word 1, $(subst /, $(SPACE), $(TEST_NAME)))) \ |
|
140 $(eval GROUP := $(word 2, $(subst :, $(SPACE), $(TEST_NAME)))) \ |
|
141 $(if $(filter $(COMPONENT), $(JTREG_COMPONENTS)), \ |
|
142 $(if $(filter $(GROUP), $($(COMPONENT)_JTREG_TEST_GROUPS)), \ |
|
143 jtreg:$(TEST_NAME) \ |
|
144 ) \ |
|
145 ) \ |
|
146 ) \ |
|
147 , \ |
|
148 $(if $(filter /%, $(TEST_NAME)), \ |
|
149 $(if $(wildcard $(TEST_NAME)), \ |
|
150 jtreg:$(TEST_NAME) \ |
|
151 ) \ |
|
152 , \ |
|
153 $(if $(wildcard $(TOPDIR)/$(TEST_NAME)), \ |
|
154 jtreg:$(TEST_NAME) \ |
|
155 ) \ |
|
156 ) \ |
|
157 ) |
|
158 endef |
|
159 |
|
160 ifeq ($(TEST), ) |
|
161 $(info No test selection given in TEST!) |
|
162 $(info Please use e.g. 'run-test TEST=tier1' or 'run-test-tier1') |
|
163 $(info See common/doc/testing.[md|html] for help) |
|
164 $(error Cannot continue) |
|
165 endif |
|
166 |
|
167 # Now intelligently convert the test selection given by the user in TEST |
|
168 # into a list of fully qualified test descriptors of the tests to run. |
|
169 TESTS_TO_RUN := |
|
170 $(foreach test, $(TEST), \ |
|
171 $(eval PARSED_TESTS := $(call ParseCustomTestSelection, $(test))) \ |
|
172 $(if $(strip $(PARSED_TESTS)), , \ |
|
173 $(eval PARSED_TESTS += $(call ParseGtestTestSelection, $(test))) \ |
|
174 ) \ |
|
175 $(if $(strip $(PARSED_TESTS)), , \ |
|
176 $(eval PARSED_TESTS += $(call ParseJtregTestSelection, $(test))) \ |
|
177 ) \ |
|
178 $(if $(strip $(PARSED_TESTS)), , \ |
|
179 $(eval UNKNOWN_TEST := $(test)) \ |
|
180 ) \ |
|
181 $(eval TESTS_TO_RUN += $(PARSED_TESTS)) \ |
|
182 ) |
|
183 |
|
184 ifneq ($(UNKNOWN_TEST), ) |
|
185 $(info Unknown test selection: '$(UNKNOWN_TEST)') |
|
186 $(info See common/doc/testing.[md|html] for help) |
|
187 $(error Cannot continue) |
|
188 endif |
|
189 |
|
190 TESTS_TO_RUN := $(strip $(TESTS_TO_RUN)) |
|
191 |
|
192 |
|
193 # Present the result of our parsing to the user |
|
194 $(info Test selection '$(TEST)', will run:) |
|
195 $(foreach test, $(TESTS_TO_RUN), $(info * $(test))) |
|
196 |
|
197 |
|
198 ################################################################################ |
|
199 # Functions for setting up rules for running the selected tests |
|
200 # |
|
201 # The SetupRun*Test functions all have the same interface: |
|
202 # |
|
203 # Parameter 1 is the name of the rule. This is the test id, based on the test |
|
204 # descriptor, and this is also used as variable prefix, and the targets |
|
205 # generated are listed in a variable by that name. |
|
206 # |
|
207 # Remaining parameters are named arguments. Currently this is only: |
|
208 # TEST -- The properly formatted fully qualified test descriptor |
|
209 # |
|
210 # After the rule named by the test id has been executed, the following |
|
211 # variables will be available: |
|
212 # testid_TOTAL - the total number of tests run |
|
213 # testid_PASSED - the number of successful tests |
|
214 # testid_FAILED - the number of failed tests |
|
215 # testid_ERROR - the number of tests was neither successful or failed |
|
216 # |
|
217 ################################################################################ |
|
218 |
|
219 ### Rules for Gtest |
|
220 |
|
221 SetupRunGtestTest = $(NamedParamsMacroTemplate) |
|
222 define SetupRunGtestTestBody |
|
223 $1_TEST_RESULTS_DIR := $$(TEST_RESULTS_DIR)/$1 |
|
224 $1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1 |
|
225 |
|
226 $1_TEST_NAME := $$(strip $$(patsubst gtest:%, %, $$($1_TEST))) |
|
227 ifneq ($$($1_TEST_NAME), all) |
|
228 $1_GTEST_FILTER := --gtest_filter=$$($1_TEST_NAME)* |
|
229 endif |
|
230 |
|
231 ifneq ($$(GTEST_REPEAT), ) |
|
232 $1_GTEST_REPEAT :=--gtest_repeat=$$(GTEST_REPEAT) |
|
233 endif |
|
234 |
|
235 run-test-$1: |
|
236 $$(call LogWarn) |
|
237 $$(call LogWarn, Running test '$$($1_TEST)') |
|
238 $$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR)) |
|
239 $$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/gtest, \ |
|
240 $$(FIXPATH) $$(TEST_IMAGE_DIR)/hotspot/gtest/server/gtestLauncher \ |
|
241 -jdk $(JDK_IMAGE_DIR) $$($1_GTEST_FILTER) \ |
|
242 --gtest_output=xml:$$($1_TEST_RESULTS_DIR)/gtest.xml \ |
|
243 $$($1_GTEST_REPEAT) $$(GTEST_OPTIONS) \ |
|
244 > >($(TEE) $$($1_TEST_RESULTS_DIR)/gtest.txt) || true ) |
|
245 |
|
246 $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/gtest.txt |
|
247 |
|
248 parse-test-$1: run-test-$1 |
|
249 $$(call LogWarn, Finished running test '$$($1_TEST)') |
|
250 $$(call LogWarn, Test report is stored in $$(strip \ |
|
251 $$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR)))) |
|
252 $$(eval $1_TOTAL := $$(shell $$(AWK) '/==========.* tests? from .* \ |
|
253 test cases? ran/ { print $$$$2 }' $$($1_RESULT_FILE))) |
|
254 $$(eval $1_PASSED := $$(shell $$(AWK) '/\[ PASSED \] .* tests?./ \ |
|
255 { print $$$$4 }' $$($1_RESULT_FILE))) |
|
256 $$(eval $1_FAILED := $$(shell $$(AWK) '/\[ FAILED \] .* tests?, \ |
|
257 listed below/ { print $$$$4 }' $$($1_RESULT_FILE))) |
|
258 $$(if $$($1_FAILED), , $$(eval $1_FAILED := 0)) |
|
259 $$(eval $1_ERROR := $$(shell \ |
|
260 $$(EXPR) $$($1_TOTAL) - $$($1_PASSED) - $$($1_FAILED))) |
|
261 |
|
262 $1: run-test-$1 parse-test-$1 |
|
263 |
|
264 TARGETS += $1 |
|
265 endef |
|
266 |
|
267 ################################################################################ |
|
268 |
|
269 ### Rules for Jtreg |
|
270 |
|
271 # Helper function for SetupRunJtregTest. Set a JTREG_* variable from, in order: |
|
272 # 1) Specified by user on command line |
|
273 # 2) Component-specific default |
|
274 # 3) Generic default |
|
275 # |
|
276 # Note: No spaces are allowed around the arguments. |
|
277 # Arg $1 The test ID (i.e. $1 in SetupRunJtregTest) |
|
278 # Arg $2 Base variable, e.g. JTREG_JOBS |
|
279 # Arg $3 The default value (optional) |
|
280 define SetJtregValue |
|
281 ifneq ($$($2), ) |
|
282 $1_$2 := $$($2) |
|
283 else |
|
284 ifneq ($$($$($1_COMPONENT)_$2), ) |
|
285 $1_$2 := $$($$($1_COMPONENT)_$2) |
|
286 else |
|
287 ifneq ($3, ) |
|
288 $1_$2 := $3 |
|
289 endif |
|
290 endif |
|
291 endif |
|
292 endef |
|
293 |
|
294 SetupRunJtregTest = $(NamedParamsMacroTemplate) |
|
295 define SetupRunJtregTestBody |
|
296 $1_TEST_RESULTS_DIR := $$(TEST_RESULTS_DIR)/$1 |
|
297 $1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1 |
|
298 |
|
299 $1_TEST_NAME := $$(strip $$(patsubst jtreg:%, %, $$($1_TEST))) |
|
300 $1_COMPONENT := $$(firstword $$(subst /, $$(SPACE), $$($1_TEST_NAME))) |
|
301 |
|
302 ifeq ($$(JT_HOME), ) |
|
303 $$(info Error: jtreg framework is not found.) |
|
304 $$(info Please run configure using --with-jtreg.) |
|
305 $$(error Cannot continue) |
|
306 endif |
|
307 |
|
308 # Unfortunately, we need different defaults for some JTREG values, |
|
309 # depending on what component we're running. |
|
310 |
|
311 # Convert JTREG_foo into $1_JTREG_foo with a suitable value. |
|
312 $$(eval $$(call SetJtregValue,$1,JTREG_TEST_MODE,agentvm)) |
|
313 $$(eval $$(call SetJtregValue,$1,JTREG_ASSERT,true)) |
|
314 $$(eval $$(call SetJtregValue,$1,JTREG_MAX_MEM,512m)) |
|
315 $$(eval $$(call SetJtregValue,$1,JTREG_NATIVEPATH)) |
|
316 $$(eval $$(call SetJtregValue,$1,JTREG_BASIC_OPTIONS)) |
|
317 |
|
318 ifneq ($(TEST_JOBS), 0) |
|
319 # User has specified TEST_JOBS, use that as fallback default |
|
320 $$(eval $$(call SetJtregValue,$1,JTREG_JOBS,$$(TEST_JOBS))) |
|
321 else |
|
322 # Use JOBS as default (except for hotspot) |
|
323 $$(eval $$(call SetJtregValue,$1,JTREG_JOBS,$$(JOBS))) |
|
324 endif |
|
325 |
|
326 ifeq ($$(shell $$(EXPR) $$($1_JTREG_JOBS) \> 50), 1) |
|
327 # Until CODETOOLS-7901892 is fixed, JTreg cannot handle more than 50 jobs |
|
328 $1_JTREG_JOBS := 50 |
|
329 endif |
|
330 |
|
331 # Make sure MaxRAMFraction is high enough to not cause OOM or swapping since |
|
332 # we may end up with a lot of JVM's |
|
333 $1_JTREG_MAX_RAM_FRACTION := $$(shell $$(EXPR) $$($1_JTREG_JOBS) \* 4) |
|
334 |
|
335 JTREG_TIMEOUT ?= 4 |
|
336 JTREG_VERBOSE ?= fail,error,summary |
|
337 JTREG_RETAIN ?= fail,error |
|
338 |
|
339 ifneq ($$($1_JTREG_MAX_MEM), 0) |
|
340 $1_JTREG_BASIC_OPTIONS += -vmoption:-Xmx$$($1_JTREG_MAX_MEM) |
|
341 $1_JTREG_LAUNCHER_OPTIONS += -Xmx$$($1_JTREG_MAX_MEM) |
|
342 endif |
|
343 |
|
344 $1_JTREG_BASIC_OPTIONS += -$$($1_JTREG_TEST_MODE) \ |
|
345 -verbose:$$(JTREG_VERBOSE) -retain:$$(JTREG_RETAIN) \ |
|
346 -concurrency:$$($1_JTREG_JOBS) -timeoutFactor:$$(JTREG_TIMEOUT) \ |
|
347 -vmoption:-XX:MaxRAMFraction=$$($1_JTREG_MAX_RAM_FRACTION) |
|
348 |
|
349 $1_JTREG_BASIC_OPTIONS += -automatic -keywords:\!ignore -ignore:quiet |
|
350 |
|
351 # Some tests needs to find a boot JDK using the JDK8_HOME variable. |
|
352 $1_JTREG_BASIC_OPTIONS += -e:JDK8_HOME=$$(BOOT_JDK) |
|
353 |
|
354 $1_JTREG_BASIC_OPTIONS += \ |
|
355 $$(addprefix -javaoption:, $$(JTREG_JAVA_OPTIONS)) \ |
|
356 $$(addprefix -vmoption:, $$(JTREG_VM_OPTIONS)) \ |
|
357 # |
|
358 |
|
359 ifeq ($$($1_JTREG_ASSERT), true) |
|
360 $1_JTREG_BASIC_OPTIONS += -ea -esa |
|
361 endif |
|
362 |
|
363 ifneq ($$($1_JTREG_NATIVEPATH), ) |
|
364 $1_JTREG_BASIC_OPTIONS += -nativepath:$$($1_JTREG_NATIVEPATH) |
|
365 endif |
|
366 |
|
367 run-test-$1: |
|
368 $$(call LogWarn) |
|
369 $$(call LogWarn, Running test '$$($1_TEST)') |
|
370 $$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR)) |
|
371 $$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, \ |
|
372 $$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \ |
|
373 -Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \ |
|
374 $$($1_JTREG_BASIC_OPTIONS) \ |
|
375 -testjdk:$$(JDK_IMAGE_DIR) \ |
|
376 -dir:$$(TOPDIR) \ |
|
377 -reportDir:$$($1_TEST_RESULTS_DIR) \ |
|
378 -workDir:$$($1_TEST_SUPPORT_DIR) \ |
|
379 $$(JTREG_OPTIONS) \ |
|
380 $$($1_TEST_NAME) || true ) |
|
381 |
|
382 $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/text/stats.txt |
|
383 |
|
384 parse-test-$1: run-test-$1 |
|
385 $$(call LogWarn, Finished running test '$$($1_TEST)') |
|
386 $$(call LogWarn, Test report is stored in $$(strip \ |
|
387 $$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR)))) |
|
388 $$(eval $1_PASSED := $$(shell $$(AWK) '{ gsub(/[,;]/, ""); \ |
|
389 for (i=1; i<=NF; i++) { if ($$$$i == "passed:") \ |
|
390 print $$$$(i+1) } }' $$($1_RESULT_FILE))) |
|
391 $$(if $$($1_PASSED), , $$(eval $1_PASSED := 0)) |
|
392 $$(eval $1_FAILED := $$(shell $$(AWK) '{gsub(/[,;]/, ""); \ |
|
393 for (i=1; i<=NF; i++) { if ($$$$i == "failed:") \ |
|
394 print $$$$(i+1) } }' $$($1_RESULT_FILE))) |
|
395 $$(if $$($1_FAILED), , $$(eval $1_FAILED := 0)) |
|
396 $$(eval $1_ERROR := $$(shell $$(AWK) '{gsub(/[,;]/, ""); \ |
|
397 for (i=1; i<=NF; i++) { if ($$$$i == "error:") \ |
|
398 print $$$$(i+1) } }' $$($1_RESULT_FILE))) |
|
399 $$(if $$($1_ERROR), , $$(eval $1_ERROR := 0)) |
|
400 $$(eval $1_TOTAL := $$(shell \ |
|
401 $$(EXPR) $$($1_PASSED) + $$($1_FAILED) + $$($1_ERROR))) |
|
402 |
|
403 $1: run-test-$1 parse-test-$1 |
|
404 |
|
405 TARGETS += $1 |
|
406 endef |
|
407 |
|
408 |
|
409 ################################################################################ |
|
410 # Setup and execute make rules for all selected tests |
|
411 ################################################################################ |
|
412 |
|
413 # Helper function to determine which handler to use for the given test |
|
414 UseGtestTestHandler = \ |
|
415 $(if $(filter gtest:%, $1), true) |
|
416 |
|
417 UseJtregTestHandler = \ |
|
418 $(if $(filter jtreg:%, $1), true) |
|
419 |
|
420 # Now process each test to run and setup a proper make rule |
|
421 $(foreach test, $(TESTS_TO_RUN), \ |
|
422 $(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \ |
|
423 $(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \ |
|
424 $(eval ALL_TEST_IDS += $(TEST_ID)) \ |
|
425 $(if $(call UseCustomTestHandler, $(test)), \ |
|
426 $(eval $(call SetupRunCustomTest, $(TEST_ID), \ |
|
427 TEST := $(test), \ |
|
428 )) \ |
|
429 ) \ |
|
430 $(if $(call UseGtestTestHandler, $(test)), \ |
|
431 $(eval $(call SetupRunGtestTest, $(TEST_ID), \ |
|
432 TEST := $(test), \ |
|
433 )) \ |
|
434 ) \ |
|
435 $(if $(call UseJtregTestHandler, $(test)), \ |
|
436 $(eval $(call SetupRunJtregTest, $(TEST_ID), \ |
|
437 TEST := $(test), \ |
|
438 )) \ |
|
439 ) \ |
|
440 ) |
|
441 |
|
442 # Sort also removes duplicates, so if there is any we'll get fewer words. |
|
443 ifneq ($(words $(ALL_TEST_IDS)), $(words $(sort $(ALL_TEST_IDS)))) |
|
444 $(error Duplicate test specification) |
|
445 endif |
|
446 |
|
447 |
|
448 ################################################################################ |
|
449 # The main target for RunTests.gmk |
|
450 ################################################################################ |
|
451 |
|
452 # The SetupRun*Test functions have populated TARGETS. |
|
453 |
|
454 TEST_FAILURE := false |
|
455 |
|
456 run-test: $(TARGETS) |
|
457 # Print a table of the result of all tests run and their result |
|
458 $(ECHO) |
|
459 $(ECHO) ============================== |
|
460 $(ECHO) Test summary |
|
461 $(ECHO) ============================== |
|
462 $(PRINTF) "%2s %-49s %5s %5s %5s %5s %2s\n" " " TEST \ |
|
463 TOTAL PASS FAIL ERROR " " |
|
464 $(foreach test, $(TESTS_TO_RUN), \ |
|
465 $(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \ |
|
466 $(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \ |
|
467 $(if $(filter $($(TEST_ID)_PASSED), $($(TEST_ID)_TOTAL)), \ |
|
468 $(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" " " "$(test)" \ |
|
469 $($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \ |
|
470 $($(TEST_ID)_ERROR) " " $(NEWLINE) \ |
|
471 , \ |
|
472 $(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" ">>" "$(test)" \ |
|
473 $($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \ |
|
474 $($(TEST_ID)_ERROR) "<<" $(NEWLINE) \ |
|
475 $(eval TEST_FAILURE := true) \ |
|
476 ) \ |
|
477 ) |
|
478 $(ECHO) ============================== |
|
479 $(if $(filter true, $(TEST_FAILURE)), \ |
|
480 $(ECHO) TEST FAILURE $(NEWLINE) \ |
|
481 $(TOUCH) $(MAKESUPPORT_OUTPUTDIR)/exit-with-error \ |
|
482 , \ |
|
483 $(ECHO) TEST SUCCESS \ |
|
484 ) |
|
485 $(ECHO) |
|
486 |
|
487 ################################################################################ |
|
488 |
|
489 all: run-test |
|
490 |
|
491 .PHONY: default all run-test $(TARGETS) |