|
1 # |
|
2 # Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 # |
|
5 # This code is free software; you can redistribute it and/or modify it |
|
6 # under the terms of the GNU General Public License version 2 only, as |
|
7 # published by the Free Software Foundation. Oracle designates this |
|
8 # particular file as subject to the "Classpath" exception as provided |
|
9 # by Oracle in the LICENSE file that accompanied this code. |
|
10 # |
|
11 # This code is distributed in the hope that it will be useful, but WITHOUT |
|
12 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
13 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
14 # version 2 for more details (a copy is included in the LICENSE file that |
|
15 # accompanied this code). |
|
16 # |
|
17 # You should have received a copy of the GNU General Public License version |
|
18 # 2 along with this work; if not, write to the Free Software Foundation, |
|
19 # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
20 # |
|
21 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
22 # or visit www.oracle.com if you need additional information or have any |
|
23 # questions. |
|
24 # |
|
25 |
|
26 default: all |
|
27 |
|
28 include $(SPEC) |
|
29 include MakeBase.gmk |
|
30 include FindTests.gmk |
|
31 |
|
32 # We will always run multiple tests serially |
|
33 .NOTPARALLEL: |
|
34 |
|
35 # Hook to include the corresponding custom file, if present. |
|
36 $(eval $(call IncludeCustomExtension, , RunTests.gmk)) |
|
37 |
|
38 TEST_RESULTS_DIR := $(BUILD_OUTPUT)/test-results |
|
39 TEST_SUPPORT_DIR := $(BUILD_OUTPUT)/test-support |
|
40 |
|
41 |
|
42 ################################################################################ |
|
43 # Parse control variables |
|
44 ################################################################################ |
|
45 |
|
46 $(eval $(call ParseKeywordVariable, JTREG, \ |
|
47 KEYWORDS := JOBS TIMEOUT TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM, \ |
|
48 STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS, \ |
|
49 )) |
|
50 |
|
51 ifneq ($(JTREG), ) |
|
52 # Inform the user |
|
53 $(info Running tests using JTREG control variable '$(JTREG)') |
|
54 endif |
|
55 |
|
56 $(eval $(call ParseKeywordVariable, GTEST, \ |
|
57 KEYWORDS := REPEAT, \ |
|
58 STRING_KEYWORDS := OPTIONS, \ |
|
59 )) |
|
60 |
|
61 ifneq ($(GTEST), ) |
|
62 # Inform the user |
|
63 $(info Running tests using GTEST control variable '$(GTEST)') |
|
64 endif |
|
65 |
|
66 |
|
67 ################################################################################ |
|
68 # Component-specific Jtreg settings |
|
69 ################################################################################ |
|
70 |
|
71 ifeq ($(TEST_JOBS), 0) |
|
72 # If TEST_JOBS is not specified, hotspot fallback default is |
|
73 # min(num_cores / 2, 12). |
|
74 hotspot_JTREG_JOBS := $(shell $(EXPR) $(NUM_CORES) / 2) |
|
75 ifeq ($(hotspot_JTREG_JOBS), 0) |
|
76 hotspot_JTREG_JOBS := 1 |
|
77 else ifeq ($(shell $(EXPR) $(hotspot_JTREG_JOBS) \> 12), 1) |
|
78 hotspot_JTREG_JOBS := 12 |
|
79 endif |
|
80 endif |
|
81 |
|
82 hotspot_JTREG_MAX_MEM := 0 |
|
83 hotspot_JTREG_ASSERT := false |
|
84 hotspot_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/hotspot/jtreg/native |
|
85 jdk_JTREG_NATIVEPATH := $(TEST_IMAGE_DIR)/jdk/jtreg/native |
|
86 |
|
87 |
|
88 ################################################################################ |
|
89 # Parse test selection |
|
90 # |
|
91 # The user has given a test selection in the TEST variable. We must parse it |
|
92 # and determine what that means in terms of actual calls to the test framework. |
|
93 # |
|
94 # The parse functions take as argument a test specification as given by the |
|
95 # user, and returns a fully qualified test descriptor if it was a match, or |
|
96 # nothing if not. A single test specification can result in multiple test |
|
97 # descriptors being returned. A valid test descriptor must always be accepted |
|
98 # and returned identically. |
|
99 ################################################################################ |
|
100 |
|
101 # Helper function to determine if a test specification is a Gtest test |
|
102 # |
|
103 # It is a Gtest test if it is either "gtest", or "gtest:" followed by an optional |
|
104 # test filter string. |
|
105 define ParseGtestTestSelection |
|
106 $(if $(filter gtest%, $1), \ |
|
107 $(if $(filter gtest, $1), \ |
|
108 gtest:all \ |
|
109 , \ |
|
110 $(if $(filter gtest:, $1), \ |
|
111 gtest:all \ |
|
112 , \ |
|
113 $1 \ |
|
114 ) \ |
|
115 ) \ |
|
116 ) |
|
117 endef |
|
118 |
|
119 # Helper function to determine if a test specification is a Jtreg test |
|
120 # |
|
121 # It is a Jtreg test if it optionally begins with jtreg:, and then is either |
|
122 # an unspecified group name (possibly prefixed by :), or a group in a |
|
123 # specified <component>/test directory, or a path to a test or test directory, |
|
124 # either absolute or relative to TOPDIR. |
|
125 define ParseJtregTestSelection |
|
126 $(eval TEST_NAME := $(strip $(patsubst jtreg:%, %, $1))) \ |
|
127 $(if $(or $(findstring :, $(TEST_NAME)), $(findstring /, $(TEST_NAME))), , \ |
|
128 $(eval TEST_NAME := :$(TEST_NAME)) \ |
|
129 ) \ |
|
130 $(if $(findstring :, $(TEST_NAME)), \ |
|
131 $(if $(filter :%, $(TEST_NAME)), \ |
|
132 $(foreach component, $(JTREG_COMPONENTS), \ |
|
133 $(if $(filter $(patsubst :%, %, $(TEST_NAME)), \ |
|
134 $($(component)_JTREG_TEST_GROUPS)), \ |
|
135 jtreg:$(component)/test:$(patsubst :%,%,$(TEST_NAME)) \ |
|
136 ) \ |
|
137 ) \ |
|
138 , \ |
|
139 $(eval COMPONENT := $(word 1, $(subst /, $(SPACE), $(TEST_NAME)))) \ |
|
140 $(eval GROUP := $(word 2, $(subst :, $(SPACE), $(TEST_NAME)))) \ |
|
141 $(if $(filter $(COMPONENT), $(JTREG_COMPONENTS)), \ |
|
142 $(if $(filter $(GROUP), $($(COMPONENT)_JTREG_TEST_GROUPS)), \ |
|
143 jtreg:$(TEST_NAME) \ |
|
144 ) \ |
|
145 ) \ |
|
146 ) \ |
|
147 , \ |
|
148 $(if $(filter /%, $(TEST_NAME)), \ |
|
149 $(if $(wildcard $(TEST_NAME)), \ |
|
150 jtreg:$(TEST_NAME) \ |
|
151 ) \ |
|
152 , \ |
|
153 $(if $(wildcard $(TOPDIR)/$(TEST_NAME)), \ |
|
154 jtreg:$(TEST_NAME) \ |
|
155 ) \ |
|
156 ) \ |
|
157 ) |
|
158 endef |
|
159 |
|
160 ifeq ($(TEST), ) |
|
161 $(info No test selection given in TEST!) |
|
162 $(info Please use e.g. 'run-test TEST=tier1' or 'run-test-tier1') |
|
163 $(error Cannot continue) |
|
164 endif |
|
165 |
|
166 # Now intelligently convert the test selection given by the user in TEST |
|
167 # into a list of fully qualified test descriptors of the tests to run. |
|
168 TESTS_TO_RUN := |
|
169 $(foreach test, $(TEST), \ |
|
170 $(eval PARSED_TESTS := $(call ParseCustomTestSelection, $(test))) \ |
|
171 $(if $(strip $(PARSED_TESTS)), , \ |
|
172 $(eval PARSED_TESTS += $(call ParseGtestTestSelection, $(test))) \ |
|
173 ) \ |
|
174 $(if $(strip $(PARSED_TESTS)), , \ |
|
175 $(eval PARSED_TESTS += $(call ParseJtregTestSelection, $(test))) \ |
|
176 ) \ |
|
177 $(if $(strip $(PARSED_TESTS)), , \ |
|
178 $(eval UNKNOWN_TEST := $(test)) \ |
|
179 ) \ |
|
180 $(eval TESTS_TO_RUN += $(PARSED_TESTS)) \ |
|
181 ) |
|
182 |
|
183 ifneq ($(UNKNOWN_TEST), ) |
|
184 $(info Unknown test selection: '$(UNKNOWN_TEST)') |
|
185 $(error Cannot continue) |
|
186 endif |
|
187 |
|
188 TESTS_TO_RUN := $(strip $(TESTS_TO_RUN)) |
|
189 |
|
190 |
|
191 # Present the result of our parsing to the user |
|
192 $(info Test selection '$(TEST)', will run:) |
|
193 $(foreach test, $(TESTS_TO_RUN), $(info * $(test))) |
|
194 |
|
195 |
|
196 ################################################################################ |
|
197 # Functions for setting up rules for running the selected tests |
|
198 # |
|
199 # The SetupRun*Test functions all have the same interface: |
|
200 # |
|
201 # Parameter 1 is the name of the rule. This is the test id, based on the test |
|
202 # descriptor, and this is also used as variable prefix, and the targets |
|
203 # generated are listed in a variable by that name. |
|
204 # |
|
205 # Remaining parameters are named arguments. Currently this is only: |
|
206 # TEST -- The properly formatted fully qualified test descriptor |
|
207 # |
|
208 # After the rule named by the test id has been executed, the following |
|
209 # variables will be available: |
|
210 # testid_TOTAL - the total number of tests run |
|
211 # testid_PASSED - the number of successful tests |
|
212 # testid_FAILED - the number of failed tests |
|
213 # testid_ERROR - the number of tests was neither successful or failed |
|
214 # |
|
215 ################################################################################ |
|
216 |
|
217 ### Rules for Gtest |
|
218 |
|
219 SetupRunGtestTest = $(NamedParamsMacroTemplate) |
|
220 define SetupRunGtestTestBody |
|
221 $1_TEST_RESULTS_DIR := $$(TEST_RESULTS_DIR)/$1 |
|
222 $1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1 |
|
223 |
|
224 $1_TEST_NAME := $$(strip $$(patsubst gtest:%, %, $$($1_TEST))) |
|
225 ifneq ($$($1_TEST_NAME), all) |
|
226 $1_GTEST_FILTER := --gtest_filter=$$($1_TEST_NAME)* |
|
227 endif |
|
228 |
|
229 ifneq ($$(GTEST_REPEAT), ) |
|
230 $1_GTEST_REPEAT :=--gtest_repeat=$$(GTEST_REPEAT) |
|
231 endif |
|
232 |
|
233 run-test-$1: |
|
234 $$(call LogWarn) |
|
235 $$(call LogWarn, Running test '$$($1_TEST)') |
|
236 $$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR)) |
|
237 $$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/gtest, \ |
|
238 $$(FIXPATH) $$(TEST_IMAGE_DIR)/hotspot/gtest/server/gtestLauncher \ |
|
239 -jdk $(JDK_IMAGE_DIR) $$($1_GTEST_FILTER) \ |
|
240 --gtest_output=xml:$$($1_TEST_RESULTS_DIR)/gtest.xml \ |
|
241 $$($1_GTEST_REPEAT) $$(GTEST_OPTIONS) \ |
|
242 > >($(TEE) $$($1_TEST_RESULTS_DIR)/gtest.txt) || true ) |
|
243 |
|
244 $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/gtest.txt |
|
245 |
|
246 parse-test-$1: run-test-$1 |
|
247 $$(call LogWarn, Finished running test '$$($1_TEST)') |
|
248 $$(call LogWarn, Test report is stored in $$(strip \ |
|
249 $$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR)))) |
|
250 $$(eval $1_TOTAL := $$(shell $$(AWK) '/==========.* tests? from .* \ |
|
251 test cases? ran/ { print $$$$2 }' $$($1_RESULT_FILE))) |
|
252 $$(eval $1_PASSED := $$(shell $$(AWK) '/\[ PASSED \] .* tests?./ \ |
|
253 { print $$$$4 }' $$($1_RESULT_FILE))) |
|
254 $$(eval $1_FAILED := $$(shell $$(AWK) '/\[ FAILED \] .* tests?, \ |
|
255 listed below/ { print $$$$4 }' $$($1_RESULT_FILE))) |
|
256 $$(if $$($1_FAILED), , $$(eval $1_FAILED := 0)) |
|
257 $$(eval $1_ERROR := $$(shell \ |
|
258 $$(EXPR) $$($1_TOTAL) - $$($1_PASSED) - $$($1_FAILED))) |
|
259 |
|
260 $1: run-test-$1 parse-test-$1 |
|
261 |
|
262 TARGETS += $1 |
|
263 endef |
|
264 |
|
265 ################################################################################ |
|
266 |
|
267 ### Rules for Jtreg |
|
268 |
|
269 # Helper function for SetupRunJtregTest. Set a JTREG_* variable from, in order: |
|
270 # 1) Specified by user on command line |
|
271 # 2) Component-specific default |
|
272 # 3) Generic default |
|
273 # |
|
274 # Note: No spaces are allowed around the arguments. |
|
275 # Arg $1 The test ID (i.e. $1 in SetupRunJtregTest) |
|
276 # Arg $2 Base variable, e.g. JTREG_JOBS |
|
277 # Arg $3 The default value (optional) |
|
278 define SetJtregValue |
|
279 ifneq ($$($2), ) |
|
280 $1_$2 := $$($2) |
|
281 else |
|
282 ifneq ($$($$($1_COMPONENT)_$2), ) |
|
283 $1_$2 := $$($$($1_COMPONENT)_$2) |
|
284 else |
|
285 ifneq ($3, ) |
|
286 $1_$2 := $3 |
|
287 endif |
|
288 endif |
|
289 endif |
|
290 endef |
|
291 |
|
292 SetupRunJtregTest = $(NamedParamsMacroTemplate) |
|
293 define SetupRunJtregTestBody |
|
294 $1_TEST_RESULTS_DIR := $$(TEST_RESULTS_DIR)/$1 |
|
295 $1_TEST_SUPPORT_DIR := $$(TEST_SUPPORT_DIR)/$1 |
|
296 |
|
297 $1_TEST_NAME := $$(strip $$(patsubst jtreg:%, %, $$($1_TEST))) |
|
298 $1_COMPONENT := $$(firstword $$(subst /, $$(SPACE), $$($1_TEST_NAME))) |
|
299 |
|
300 # Unfortunately, we need different defaults for some JTREG values, |
|
301 # depending on what component we're running. |
|
302 |
|
303 # Convert JTREG_foo into $1_JTREG_foo with a suitable value. |
|
304 $$(eval $$(call SetJtregValue,$1,JTREG_TEST_MODE,agentvm)) |
|
305 $$(eval $$(call SetJtregValue,$1,JTREG_ASSERT,true)) |
|
306 $$(eval $$(call SetJtregValue,$1,JTREG_MAX_MEM,512m)) |
|
307 $$(eval $$(call SetJtregValue,$1,JTREG_NATIVEPATH)) |
|
308 $$(eval $$(call SetJtregValue,$1,JTREG_BASIC_OPTIONS)) |
|
309 |
|
310 ifneq ($(TEST_JOBS), 0) |
|
311 # User has specified TEST_JOBS, use that as fallback default |
|
312 $$(eval $$(call SetJtregValue,$1,JTREG_JOBS,$$(TEST_JOBS))) |
|
313 else |
|
314 # Use JOBS as default (except for hotspot) |
|
315 $$(eval $$(call SetJtregValue,$1,JTREG_JOBS,$$(JOBS))) |
|
316 endif |
|
317 |
|
318 ifeq ($$(shell $$(EXPR) $$($1_JTREG_JOBS) \> 50), 1) |
|
319 # Until CODETOOLS-7901892 is fixed, JTreg cannot handle more than 50 jobs |
|
320 $1_JTREG_JOBS := 50 |
|
321 endif |
|
322 |
|
323 # Make sure MaxRAMFraction is high enough to not cause OOM or swapping since |
|
324 # we may end up with a lot of JVM's |
|
325 $1_JTREG_MAX_RAM_FRACTION := $$(shell $$(EXPR) $$($1_JTREG_JOBS) \* 4) |
|
326 |
|
327 JTREG_TIMEOUT ?= 4 |
|
328 JTREG_VERBOSE ?= fail,error,summary |
|
329 JTREG_RETAIN ?= fail,error |
|
330 |
|
331 ifneq ($$($1_JTREG_MAX_MEM), 0) |
|
332 $1_JTREG_BASIC_OPTIONS += -vmoption:-Xmx$$($1_JTREG_MAX_MEM) |
|
333 $1_JTREG_LAUNCHER_OPTIONS += -Xmx$$($1_JTREG_MAX_MEM) |
|
334 endif |
|
335 |
|
336 $1_JTREG_BASIC_OPTIONS += -$$($1_JTREG_TEST_MODE) \ |
|
337 -verbose:$$(JTREG_VERBOSE) -retain:$$(JTREG_RETAIN) \ |
|
338 -concurrency:$$($1_JTREG_JOBS) -timeoutFactor:$$(JTREG_TIMEOUT) \ |
|
339 -vmoption:-XX:MaxRAMFraction=$$($1_JTREG_MAX_RAM_FRACTION) |
|
340 |
|
341 $1_JTREG_BASIC_OPTIONS += -automatic -keywords:\!ignore -ignore:quiet |
|
342 |
|
343 # Some tests needs to find a boot JDK using the JDK8_HOME variable. |
|
344 $1_JTREG_BASIC_OPTIONS += -e:JDK8_HOME=$$(BOOT_JDK) |
|
345 |
|
346 $1_JTREG_BASIC_OPTIONS += \ |
|
347 $$(addprefix -javaoption:, $$(JTREG_JAVA_OPTIONS)) \ |
|
348 $$(addprefix -vmoption:, $$(JTREG_VM_OPTIONS)) \ |
|
349 # |
|
350 |
|
351 ifeq ($$($1_JTREG_ASSERT), true) |
|
352 $1_JTREG_BASIC_OPTIONS += -ea -esa |
|
353 endif |
|
354 |
|
355 ifneq ($$($1_JTREG_NATIVEPATH), ) |
|
356 $1_JTREG_BASIC_OPTIONS += -nativepath:$$($1_JTREG_NATIVEPATH) |
|
357 endif |
|
358 |
|
359 run-test-$1: |
|
360 $$(call LogWarn) |
|
361 $$(call LogWarn, Running test '$$($1_TEST)') |
|
362 $$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR)) |
|
363 $$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, \ |
|
364 $$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \ |
|
365 -Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \ |
|
366 $$($1_JTREG_BASIC_OPTIONS) \ |
|
367 -testjdk:$$(JDK_IMAGE_DIR) \ |
|
368 -dir:$$(TOPDIR) \ |
|
369 -reportDir:$$($1_TEST_RESULTS_DIR) \ |
|
370 -workDir:$$($1_TEST_SUPPORT_DIR) \ |
|
371 $$(JTREG_OPTIONS) \ |
|
372 $$($1_TEST_NAME) || true ) |
|
373 |
|
374 $1_RESULT_FILE := $$($1_TEST_RESULTS_DIR)/text/stats.txt |
|
375 |
|
376 parse-test-$1: run-test-$1 |
|
377 $$(call LogWarn, Finished running test '$$($1_TEST)') |
|
378 $$(call LogWarn, Test report is stored in $$(strip \ |
|
379 $$(subst $$(TOPDIR)/, , $$($1_TEST_RESULTS_DIR)))) |
|
380 $$(eval $1_PASSED := $$(shell $$(AWK) '{ gsub(/[,;]/, ""); \ |
|
381 for (i=1; i<=NF; i++) { if ($$$$i == "passed:") \ |
|
382 print $$$$(i+1) } }' $$($1_RESULT_FILE))) |
|
383 $$(if $$($1_PASSED), , $$(eval $1_PASSED := 0)) |
|
384 $$(eval $1_FAILED := $$(shell $$(AWK) '{gsub(/[,;]/, ""); \ |
|
385 for (i=1; i<=NF; i++) { if ($$$$i == "failed:") \ |
|
386 print $$$$(i+1) } }' $$($1_RESULT_FILE))) |
|
387 $$(if $$($1_FAILED), , $$(eval $1_FAILED := 0)) |
|
388 $$(eval $1_ERROR := $$(shell $$(AWK) '{gsub(/[,;]/, ""); \ |
|
389 for (i=1; i<=NF; i++) { if ($$$$i == "error:") \ |
|
390 print $$$$(i+1) } }' $$($1_RESULT_FILE))) |
|
391 $$(if $$($1_ERROR), , $$(eval $1_ERROR := 0)) |
|
392 $$(eval $1_TOTAL := $$(shell \ |
|
393 $$(EXPR) $$($1_PASSED) + $$($1_FAILED) + $$($1_ERROR))) |
|
394 |
|
395 $1: run-test-$1 parse-test-$1 |
|
396 |
|
397 TARGETS += $1 |
|
398 endef |
|
399 |
|
400 |
|
401 ################################################################################ |
|
402 # Setup and execute make rules for all selected tests |
|
403 ################################################################################ |
|
404 |
|
405 # Helper function to determine which handler to use for the given test |
|
406 UseGtestTestHandler = \ |
|
407 $(if $(filter gtest:%, $1), true) |
|
408 |
|
409 UseJtregTestHandler = \ |
|
410 $(if $(filter jtreg:%, $1), true) |
|
411 |
|
412 # Now process each test to run and setup a proper make rule |
|
413 $(foreach test, $(TESTS_TO_RUN), \ |
|
414 $(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \ |
|
415 $(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \ |
|
416 $(eval ALL_TEST_IDS += $(TEST_ID)) \ |
|
417 $(if $(call UseCustomTestHandler, $(test)), \ |
|
418 $(eval $(call SetupRunCustomTest, $(TEST_ID), \ |
|
419 TEST := $(test), \ |
|
420 )) \ |
|
421 ) \ |
|
422 $(if $(call UseGtestTestHandler, $(test)), \ |
|
423 $(eval $(call SetupRunGtestTest, $(TEST_ID), \ |
|
424 TEST := $(test), \ |
|
425 )) \ |
|
426 ) \ |
|
427 $(if $(call UseJtregTestHandler, $(test)), \ |
|
428 $(eval $(call SetupRunJtregTest, $(TEST_ID), \ |
|
429 TEST := $(test), \ |
|
430 )) \ |
|
431 ) \ |
|
432 ) |
|
433 |
|
434 # Sort also removes duplicates, so if there is any we'll get fewer words. |
|
435 ifneq ($(words $(ALL_TEST_IDS)), $(words $(sort $(ALL_TEST_IDS)))) |
|
436 $(error Duplicate test specification) |
|
437 endif |
|
438 |
|
439 |
|
440 ################################################################################ |
|
441 # The main target for RunTests.gmk |
|
442 ################################################################################ |
|
443 |
|
444 # The SetupRun*Test functions have populated TARGETS. |
|
445 |
|
446 TEST_FAILURE := false |
|
447 |
|
448 run-test: $(TARGETS) |
|
449 # Print a table of the result of all tests run and their result |
|
450 $(ECHO) |
|
451 $(ECHO) ============================== |
|
452 $(ECHO) Test summary |
|
453 $(ECHO) ============================== |
|
454 $(PRINTF) "%2s %-49s %5s %5s %5s %5s %2s\n" " " TEST \ |
|
455 TOTAL PASS FAIL ERROR " " |
|
456 $(foreach test, $(TESTS_TO_RUN), \ |
|
457 $(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \ |
|
458 $(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \ |
|
459 $(if $(filter $($(TEST_ID)_PASSED), $($(TEST_ID)_TOTAL)), \ |
|
460 $(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" " " "$(test)" \ |
|
461 $($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \ |
|
462 $($(TEST_ID)_ERROR) " " $(NEWLINE) \ |
|
463 , \ |
|
464 $(PRINTF) "%2s %-49s %5d %5d %5d %5d %2s\n" ">>" "$(test)" \ |
|
465 $($(TEST_ID)_TOTAL) $($(TEST_ID)_PASSED) $($(TEST_ID)_FAILED) \ |
|
466 $($(TEST_ID)_ERROR) "<<" $(NEWLINE) \ |
|
467 $(eval TEST_FAILURE := true) \ |
|
468 ) \ |
|
469 ) |
|
470 $(ECHO) ============================== |
|
471 $(if $(filter true, $(TEST_FAILURE)), \ |
|
472 $(ECHO) TEST FAILURE $(NEWLINE) \ |
|
473 $(TOUCH) $(MAKESUPPORT_OUTPUTDIR)/exit-with-error \ |
|
474 , \ |
|
475 $(ECHO) TEST SUCCESS \ |
|
476 ) |
|
477 $(ECHO) |
|
478 |
|
479 ################################################################################ |
|
480 |
|
481 all: run-test |
|
482 |
|
483 .PHONY: default all run-test $(TARGETS) |