]> git.donarmstrong.com Git - qmk_firmware.git/blob - tool/mbed/mbed-sdk/workspace_tools/test_api.py
Squashed 'tmk_core/' changes from 7967731..b9e0ea0
[qmk_firmware.git] / tool / mbed / mbed-sdk / workspace_tools / test_api.py
1 """
2 mbed SDK
3 Copyright (c) 2011-2014 ARM Limited
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9     http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
18 """
19
20 import os
21 import re
22 import sys
23 import json
24 import uuid
25 import pprint
26 import random
27 import optparse
28 import datetime
29 import threading
30 from types import ListType
31 from colorama import Fore, Back, Style
32 from prettytable import PrettyTable
33
34 from time import sleep, time
35 from Queue import Queue, Empty
36 from os.path import join, exists, basename
37 from threading import Thread, Lock
38 from subprocess import Popen, PIPE
39
40 # Imports related to mbed build api
41 from workspace_tools.tests import TESTS
42 from workspace_tools.tests import TEST_MAP
43 from workspace_tools.paths import BUILD_DIR
44 from workspace_tools.paths import HOST_TESTS
45 from workspace_tools.utils import ToolException
46 from workspace_tools.utils import construct_enum
47 from workspace_tools.targets import TARGET_MAP
48 from workspace_tools.test_db import BaseDBAccess
49 from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
50 from workspace_tools.build_api import get_target_supported_toolchains
51 from workspace_tools.build_api import write_build_report
52 from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
53 from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
54 from workspace_tools.test_exporters import ReportExporter, ResultExporterType
55
56
57 import workspace_tools.host_tests.host_tests_plugins as host_tests_plugins
58
59 try:
60     import mbed_lstools
61 except:
62     pass
63
64
65 class ProcessObserver(Thread):
66     def __init__(self, proc):
67         Thread.__init__(self)
68         self.proc = proc
69         self.queue = Queue()
70         self.daemon = True
71         self.active = True
72         self.start()
73
74     def run(self):
75         while self.active:
76             c = self.proc.stdout.read(1)
77             self.queue.put(c)
78
79     def stop(self):
80         self.active = False
81         try:
82             self.proc.terminate()
83         except Exception, _:
84             pass
85
86
87 class SingleTestExecutor(threading.Thread):
88     """ Example: Single test class in separate thread usage
89     """
90     def __init__(self, single_test):
91         self.single_test = single_test
92         threading.Thread.__init__(self)
93
94     def run(self):
95         start = time()
96         # Execute tests depending on options and filter applied
97         test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
98         elapsed_time = time() - start
99
100         # Human readable summary
101         if not self.single_test.opts_suppress_summary:
102             # prints well-formed summary with results (SQL table like)
103             print self.single_test.generate_test_summary(test_summary, shuffle_seed)
104         if self.single_test.opts_test_x_toolchain_summary:
105             # prints well-formed summary with results (SQL table like)
106             # table shows text x toolchain test result matrix
107             print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
108         print "Completed in %.2f sec"% (elapsed_time)
109
110
111 class SingleTestRunner(object):
112     """ Object wrapper for single test run which may involve multiple MUTs
113     """
114     RE_DETECT_TESTCASE_RESULT = None
115
116     # Return codes for test script
117     TEST_RESULT_OK = "OK"
118     TEST_RESULT_FAIL = "FAIL"
119     TEST_RESULT_ERROR = "ERROR"
120     TEST_RESULT_UNDEF = "UNDEF"
121     TEST_RESULT_IOERR_COPY = "IOERR_COPY"
122     TEST_RESULT_IOERR_DISK = "IOERR_DISK"
123     TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
124     TEST_RESULT_TIMEOUT = "TIMEOUT"
125     TEST_RESULT_NO_IMAGE = "NO_IMAGE"
126     TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
127
128     GLOBAL_LOOPS_COUNT = 1  # How many times each test should be repeated
129     TEST_LOOPS_LIST = []    # We redefine no.of loops per test_id
130     TEST_LOOPS_DICT = {}    # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
131
132     muts = {} # MUTs descriptor (from external file)
133     test_spec = {} # Test specification (from external file)
134
135     # mbed test suite -> SingleTestRunner
136     TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
137                            "failure" : TEST_RESULT_FAIL,
138                            "error" : TEST_RESULT_ERROR,
139                            "ioerr_copy" : TEST_RESULT_IOERR_COPY,
140                            "ioerr_disk" : TEST_RESULT_IOERR_DISK,
141                            "ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
142                            "timeout" : TEST_RESULT_TIMEOUT,
143                            "no_image" : TEST_RESULT_NO_IMAGE,
144                            "end" : TEST_RESULT_UNDEF,
145                            "mbed_assert" : TEST_RESULT_MBED_ASSERT
146     }
147
148     def __init__(self,
149                  _global_loops_count=1,
150                  _test_loops_list=None,
151                  _muts={},
152                  _clean=False,
153                  _opts_db_url=None,
154                  _opts_log_file_name=None,
155                  _opts_report_html_file_name=None,
156                  _opts_report_junit_file_name=None,
157                  _opts_report_build_file_name=None,
158                  _test_spec={},
159                  _opts_goanna_for_mbed_sdk=None,
160                  _opts_goanna_for_tests=None,
161                  _opts_shuffle_test_order=False,
162                  _opts_shuffle_test_seed=None,
163                  _opts_test_by_names=None,
164                  _opts_peripheral_by_names=None,
165                  _opts_test_only_peripheral=False,
166                  _opts_test_only_common=False,
167                  _opts_verbose_skipped_tests=False,
168                  _opts_verbose_test_result_only=False,
169                  _opts_verbose=False,
170                  _opts_firmware_global_name=None,
171                  _opts_only_build_tests=False,
172                  _opts_parallel_test_exec=False,
173                  _opts_suppress_summary=False,
174                  _opts_test_x_toolchain_summary=False,
175                  _opts_copy_method=None,
176                  _opts_mut_reset_type=None,
177                  _opts_jobs=None,
178                  _opts_waterfall_test=None,
179                  _opts_extend_test_timeout=None):
180         """ Let's try hard to init this object
181         """
182         from colorama import init
183         init()
184
185         PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
186         self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
187         # Settings related to test loops counters
188         try:
189             _global_loops_count = int(_global_loops_count)
190         except:
191             _global_loops_count = 1
192         if _global_loops_count < 1:
193             _global_loops_count = 1
194         self.GLOBAL_LOOPS_COUNT = _global_loops_count
195         self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
196         self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
197
198         self.shuffle_random_seed = 0.0
199         self.SHUFFLE_SEED_ROUND = 10
200
201         # MUT list and test specification storage
202         self.muts = _muts
203         self.test_spec = _test_spec
204
205         # Settings passed e.g. from command line
206         self.opts_db_url = _opts_db_url
207         self.opts_log_file_name = _opts_log_file_name
208         self.opts_report_html_file_name = _opts_report_html_file_name
209         self.opts_report_junit_file_name = _opts_report_junit_file_name
210         self.opts_report_build_file_name = _opts_report_build_file_name
211         self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
212         self.opts_goanna_for_tests = _opts_goanna_for_tests
213         self.opts_shuffle_test_order = _opts_shuffle_test_order
214         self.opts_shuffle_test_seed = _opts_shuffle_test_seed
215         self.opts_test_by_names = _opts_test_by_names
216         self.opts_peripheral_by_names = _opts_peripheral_by_names
217         self.opts_test_only_peripheral = _opts_test_only_peripheral
218         self.opts_test_only_common = _opts_test_only_common
219         self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
220         self.opts_verbose_test_result_only = _opts_verbose_test_result_only
221         self.opts_verbose = _opts_verbose
222         self.opts_firmware_global_name = _opts_firmware_global_name
223         self.opts_only_build_tests = _opts_only_build_tests
224         self.opts_parallel_test_exec = _opts_parallel_test_exec
225         self.opts_suppress_summary = _opts_suppress_summary
226         self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
227         self.opts_copy_method = _opts_copy_method
228         self.opts_mut_reset_type = _opts_mut_reset_type
229         self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
230         self.opts_waterfall_test = _opts_waterfall_test
231         self.opts_extend_test_timeout = _opts_extend_test_timeout
232         self.opts_clean = _clean
233
234         # File / screen logger initialization
235         self.logger = CLITestLogger(file_name=self.opts_log_file_name)  # Default test logger
236
237         # Database related initializations
238         self.db_logger = factory_db_logger(self.opts_db_url)
239         self.db_logger_build_id = None # Build ID (database index of build_id table)
240         # Let's connect to database to set up credentials and confirm database is ready
241         if self.db_logger:
242             self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
243             if self.db_logger.is_connected():
244                 # Get hostname and uname so we can use it as build description
245                 # when creating new build_id in external database
246                 (_hostname, _uname) = self.db_logger.get_hostname()
247                 _host_location = os.path.dirname(os.path.abspath(__file__))
248                 build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
249                 self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
250                 self.db_logger.disconnect()
251
252     def dump_options(self):
253         """ Function returns data structure with common settings passed to SingelTestRunner
254             It can be used for example to fill _extra fields in database storing test suite single run data
255             Example:
256             data = self.dump_options()
257             or
258             data_str = json.dumps(self.dump_options())
259         """
260         result = {"db_url" : str(self.opts_db_url),
261                   "log_file_name" :  str(self.opts_log_file_name),
262                   "shuffle_test_order" : str(self.opts_shuffle_test_order),
263                   "shuffle_test_seed" : str(self.opts_shuffle_test_seed),
264                   "test_by_names" :  str(self.opts_test_by_names),
265                   "peripheral_by_names" : str(self.opts_peripheral_by_names),
266                   "test_only_peripheral" :  str(self.opts_test_only_peripheral),
267                   "test_only_common" :  str(self.opts_test_only_common),
268                   "verbose" :  str(self.opts_verbose),
269                   "firmware_global_name" :  str(self.opts_firmware_global_name),
270                   "only_build_tests" :  str(self.opts_only_build_tests),
271                   "copy_method" :  str(self.opts_copy_method),
272                   "mut_reset_type" :  str(self.opts_mut_reset_type),
273                   "jobs" :  str(self.opts_jobs),
274                   "extend_test_timeout" :  str(self.opts_extend_test_timeout),
275                   "_dummy" : ''
276         }
277         return result
278
279     def shuffle_random_func(self):
280         return self.shuffle_random_seed
281
282     def is_shuffle_seed_float(self):
283         """ return true if function parameter can be converted to float
284         """
285         result = True
286         try:
287             float(self.shuffle_random_seed)
288         except ValueError:
289             result = False
290         return result
291
292     # This will store target / toolchain specific properties
293     test_suite_properties_ext = {}  # target : toolchain
294     # Here we store test results
295     test_summary = []
296     # Here we store test results in extended data structure
297     test_summary_ext = {}
298     execute_thread_slice_lock = Lock()
299
300     def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
301         for toolchain in toolchains:
302             # Toolchain specific build successes and failures
303             build_report[toolchain] = {
304                 "mbed_failure": False,
305                 "library_failure": False,
306                 "library_build_passing": [],
307                 "library_build_failing": [],
308                 "test_build_passing": [],
309                 "test_build_failing": []
310             }
311             # print target, toolchain
312             # Test suite properties returned to external tools like CI
313             test_suite_properties = {}
314             test_suite_properties['jobs'] = self.opts_jobs
315             test_suite_properties['clean'] = clean
316             test_suite_properties['target'] = target
317             test_suite_properties['test_ids'] = ', '.join(test_ids)
318             test_suite_properties['toolchain'] = toolchain
319             test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
320
321
322             # print '=== %s::%s ===' % (target, toolchain)
323             # Let's build our test
324             if target not in TARGET_MAP:
325                 print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
326                 continue
327
328             T = TARGET_MAP[target]
329             build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
330             clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
331
332             try:
333                 build_mbed_libs_result = build_mbed_libs(T,
334                                                          toolchain,
335                                                          options=build_mbed_libs_options,
336                                                          clean=clean_mbed_libs_options,
337                                                          jobs=self.opts_jobs)
338
339                 if not build_mbed_libs_result:
340                     print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
341                     continue
342             except ToolException:
343                 print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
344                 build_report[toolchain]["mbed_failure"] = True
345                 #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
346                 continue
347
348             build_dir = join(BUILD_DIR, "test", target, toolchain)
349
350             test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
351             test_suite_properties['build_dir'] = build_dir
352             test_suite_properties['skipped'] = []
353
354             # Enumerate through all tests and shuffle test order if requested
355             test_map_keys = sorted(TEST_MAP.keys())
356
357             if self.opts_shuffle_test_order:
358                 random.shuffle(test_map_keys, self.shuffle_random_func)
359                 # Update database with shuffle seed f applicable
360                 if self.db_logger:
361                     self.db_logger.reconnect();
362                     if self.db_logger.is_connected():
363                         self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
364                         self.db_logger.disconnect();
365
366             if self.db_logger:
367                 self.db_logger.reconnect();
368                 if self.db_logger.is_connected():
369                     # Update MUTs and Test Specification in database
370                     self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
371                     # Update Extra information in database (some options passed to test suite)
372                     self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
373                     self.db_logger.disconnect();
374
375             valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids)
376             skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
377
378             for skipped_test_id in skipped_test_map_keys:
379                 test_suite_properties['skipped'].append(skipped_test_id)
380
381
382             # First pass through all tests and determine which libraries need to be built
383             libraries = set()
384             for test_id in valid_test_map_keys:
385                 test = TEST_MAP[test_id]
386
387                 # Detect which lib should be added to test
388                 # Some libs have to compiled like RTOS or ETH
389                 for lib in LIBRARIES:
390                     if lib['build_dir'] in test.dependencies:
391                         libraries.add(lib['id'])
392
393
394             build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
395             clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
396
397             # Build all required libraries
398             for lib_id in libraries:
399                 try:
400                     build_lib(lib_id,
401                               T,
402                               toolchain,
403                               options=build_project_options,
404                               verbose=self.opts_verbose,
405                               clean=clean_mbed_libs_options,
406                               jobs=self.opts_jobs)
407
408                     build_report[toolchain]["library_build_passing"].append(lib_id)
409
410                 except ToolException:
411                     print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
412                     build_report[toolchain]["library_failure"] = True
413                     build_report[toolchain]["library_build_failing"].append(lib_id)
414                     #return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
415                     continue
416
417
418
419
420             for test_id in valid_test_map_keys:
421                 test = TEST_MAP[test_id]
422
423                 test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
424
425                 # TODO: move this 2 below loops to separate function
426                 INC_DIRS = []
427                 for lib_id in libraries:
428                     if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
429                         INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
430
431                 MACROS = []
432                 for lib_id in libraries:
433                     if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
434                         MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
435                 MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
436                 MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
437                 test_uuid = uuid.uuid4()
438                 MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
439
440                 project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
441                 try:
442                     path = build_project(test.source_dir,
443                                      join(build_dir, test_id),
444                                      T,
445                                      toolchain,
446                                      test.dependencies,
447                                      options=build_project_options,
448                                      clean=clean_project_options,
449                                      verbose=self.opts_verbose,
450                                      name=project_name,
451                                      macros=MACROS,
452                                      inc_dirs=INC_DIRS,
453                                      jobs=self.opts_jobs)
454                     build_report[toolchain]["test_build_passing"].append(test_id)
455
456                 except ToolException:
457                     project_name_str = project_name if project_name is not None else test_id
458                     print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
459                     build_report[toolchain]["test_build_failing"].append(test_id)
460                     # return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
461                     continue
462
463                 if self.opts_only_build_tests:
464                     # With this option we are skipping testing phase
465                     continue
466
467                 # Test duration can be increased by global value
468                 test_duration = test.duration
469                 if self.opts_extend_test_timeout is not None:
470                     test_duration += self.opts_extend_test_timeout
471
472                 # For an automated test the duration act as a timeout after
473                 # which the test gets interrupted
474                 test_spec = self.shape_test_request(target, path, test_id, test_duration)
475                 test_loops = self.get_test_loop_count(test_id)
476
477                 test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
478                 test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
479                 test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
480
481                 # read MUTs, test specification and perform tests
482                 single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
483
484                 # Append test results to global test summary
485                 if single_test_result is not None:
486                     self.test_summary.append(single_test_result)
487
488                 # Prepare extended test results data structure (it can be used to generate detailed test report)
489                 if toolchain not in self.test_summary_ext:
490                     self.test_summary_ext[toolchain] = {}  # test_summary_ext : toolchain
491                 if target not in self.test_summary_ext[toolchain]:
492                     self.test_summary_ext[toolchain][target] = {}    # test_summary_ext : toolchain : target
493                 if target not in self.test_summary_ext[toolchain][target]:
494                     self.test_summary_ext[toolchain][target][test_id] = detailed_test_results    # test_summary_ext : toolchain : target : test_it
495
496             test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
497             self.test_suite_properties_ext[target][toolchain] = test_suite_properties
498
499         # return self.test_summary, self.shuffle_random_seed, test_summary_ext, self.test_suite_properties_ext
500         q.put(target + '_'.join(toolchains))
501         return
502
503     def execute(self):
504         clean = self.test_spec.get('clean', False)
505         test_ids = self.test_spec.get('test_ids', [])
506         q = Queue()
507
508         # Generate seed for shuffle if seed is not provided in
509         self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
510         if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
511             self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
512
513         build_reports = []
514
515         if self.opts_parallel_test_exec:
516             ###################################################################
517             # Experimental, parallel test execution per singletest instance.
518             ###################################################################
519             execute_threads = []    # Threads used to build mbed SDL, libs, test cases and execute tests
520             # Note: We are building here in parallel for each target separately!
521             # So we are not building the same thing multiple times and compilers
522             # in separate threads do not collide.
523             # Inside execute_thread_slice() function function handle() will be called to
524             # get information about available MUTs (per target).
525             for target, toolchains in self.test_spec['targets'].iteritems():
526                 self.test_suite_properties_ext[target] = {}
527                 cur_build_report = {}
528                 t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, cur_build_report))
529                 build_reports.append({ "target": target, "report": cur_build_report})
530                 t.daemon = True
531                 t.start()
532                 execute_threads.append(t)
533
534             for t in execute_threads:
535                 q.get() # t.join() would block some threads because we should not wait in any order for thread end
536         else:
537             # Serialized (not parallel) test execution
538             for target, toolchains in self.test_spec['targets'].iteritems():
539                 if target not in self.test_suite_properties_ext:
540                     self.test_suite_properties_ext[target] = {}
541
542                 cur_build_report = {}
543                 self.execute_thread_slice(q, target, toolchains, clean, test_ids, cur_build_report)
544                 build_reports.append({ "target": target, "report": cur_build_report})
545                 q.get()
546
547         build_report = []
548
549         for target_build_report in build_reports:
550             cur_report = {
551                 "target": target_build_report["target"],
552                 "passing": [],
553                 "failing": []
554             }
555
556             for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
557                 print "%s - %s" % (target_build_report["target"], toolchain)
558                 report = target_build_report["report"][toolchain]
559
560                 if report["mbed_failure"]:
561                     cur_report["failing"].append({
562                         "toolchain": toolchain,
563                         "project": "mbed library"
564                     })
565                 else:
566                     for passing_library in report["library_build_failing"]:
567                         cur_report["failing"].append({
568                             "toolchain": toolchain,
569                             "project": "Library::%s" % (passing_library)
570                         })
571
572                     for failing_library in report["library_build_passing"]:
573                         cur_report["passing"].append({
574                             "toolchain": toolchain,
575                             "project": "Library::%s" % (failing_library)
576                         })
577
578                     for passing_test in report["test_build_passing"]:
579                         cur_report["passing"].append({
580                             "toolchain": toolchain,
581                             "project": "Test::%s" % (passing_test)
582                         })
583
584                     for failing_test in report["test_build_failing"]:
585                         cur_report["failing"].append({
586                             "toolchain": toolchain,
587                             "project": "Test::%s" % (failing_test)
588                         })
589
590
591             build_report.append(cur_report)
592
593         if self.db_logger:
594             self.db_logger.reconnect();
595             if self.db_logger.is_connected():
596                 self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
597                 self.db_logger.disconnect();
598
599         return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, build_report
600
601     def get_valid_tests(self, test_map_keys, target, toolchain, test_ids):
602         valid_test_map_keys = []
603
604         for test_id in test_map_keys:
605             test = TEST_MAP[test_id]
606             if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
607                 continue
608
609             if test_ids and test_id not in test_ids:
610                 continue
611
612             if self.opts_test_only_peripheral and not test.peripherals:
613                 if self.opts_verbose_skipped_tests:
614                     print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
615                 continue
616
617             if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names.split(',')]):
618                 # We will skip tests not forced with -p option
619                 if self.opts_verbose_skipped_tests:
620                     print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
621                 continue
622
623             if self.opts_test_only_common and test.peripherals:
624                 if self.opts_verbose_skipped_tests:
625                     print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
626                 continue
627
628             if test.automated and test.is_supported(target, toolchain):
629                 if test.peripherals is None and self.opts_only_build_tests:
630                     # When users are using 'build only flag' and test do not have
631                     # specified peripherals we can allow test building by default
632                     pass
633                 elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names.split(','):
634                     # If we force peripheral with option -p we expect test
635                     # to pass even if peripheral is not in MUTs file.
636                     pass
637                 elif not self.is_peripherals_available(target, test.peripherals):
638                     if self.opts_verbose_skipped_tests:
639                         if test.peripherals:
640                             print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
641                         else:
642                             print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
643                     continue
644
645                 # The test has made it through all the filters, so add it to the valid tests list
646                 valid_test_map_keys.append(test_id)
647
648         return valid_test_map_keys
649
650     def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
651         # NOTE: This will not preserve order
652         return list(set(all_test_map_keys) - set(valid_test_map_keys))
653
654     def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
655         """ Prints well-formed summary with results (SQL table like)
656             table shows text x toolchain test result matrix
657         """
658         RESULT_INDEX = 0
659         TARGET_INDEX = 1
660         TOOLCHAIN_INDEX = 2
661         TEST_INDEX = 3
662         DESC_INDEX = 4
663
664         unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
665         unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
666         unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
667         unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
668
669         result = "Test summary:\n"
670         for target in unique_targets:
671             result_dict = {} # test : { toolchain : result }
672             unique_target_toolchains = []
673             for test in test_summary:
674                 if test[TARGET_INDEX] == target:
675                     if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
676                         unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
677                     if test[TEST_INDEX] not in result_dict:
678                         result_dict[test[TEST_INDEX]] = {}
679                     result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
680
681             pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
682             pt = PrettyTable(pt_cols)
683             for col in pt_cols:
684                 pt.align[col] = "l"
685             pt.padding_width = 1 # One space between column edges and contents (default)
686
687             for test in unique_tests:
688                 if test in result_dict:
689                     test_results = result_dict[test]
690                     if test in unique_test_desc:
691                         row = [target, test, unique_test_desc[test]]
692                         for toolchain in unique_toolchains:
693                             if toolchain in test_results:
694                                 row.append(test_results[toolchain])
695                         pt.add_row(row)
696             result += pt.get_string()
697             shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
698                                                        shuffle_seed if shuffle_seed else self.shuffle_random_seed)
699             result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
700         return result
701
702     def generate_test_summary(self, test_summary, shuffle_seed=None):
703         """ Prints well-formed summary with results (SQL table like)
704             table shows target x test results matrix across
705         """
706         result = "Test summary:\n"
707         # Pretty table package is used to print results
708         pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
709                           "Elapsed Time (sec)", "Timeout (sec)", "Loops"])
710         pt.align["Result"] = "l" # Left align
711         pt.align["Target"] = "l" # Left align
712         pt.align["Toolchain"] = "l" # Left align
713         pt.align["Test ID"] = "l" # Left align
714         pt.align["Test Description"] = "l" # Left align
715         pt.padding_width = 1 # One space between column edges and contents (default)
716
717         result_dict = {self.TEST_RESULT_OK : 0,
718                        self.TEST_RESULT_FAIL : 0,
719                        self.TEST_RESULT_ERROR : 0,
720                        self.TEST_RESULT_UNDEF : 0,
721                        self.TEST_RESULT_IOERR_COPY : 0,
722                        self.TEST_RESULT_IOERR_DISK : 0,
723                        self.TEST_RESULT_IOERR_SERIAL : 0,
724                        self.TEST_RESULT_NO_IMAGE : 0,
725                        self.TEST_RESULT_TIMEOUT : 0,
726                        self.TEST_RESULT_MBED_ASSERT : 0
727         }
728
729         for test in test_summary:
730             if test[0] in result_dict:
731                 result_dict[test[0]] += 1
732             pt.add_row(test)
733         result += pt.get_string()
734         result += "\n"
735
736         # Print result count
737         result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
738         shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
739                                                     shuffle_seed if shuffle_seed else self.shuffle_random_seed)
740         result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
741         return result
742
743     def test_loop_list_to_dict(self, test_loops_str):
744         """ Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
745         """
746         result = {}
747         if test_loops_str:
748             test_loops = test_loops_str.split(',')
749             for test_loop in test_loops:
750                 test_loop_count = test_loop.split('=')
751                 if len(test_loop_count) == 2:
752                     _test_id, _test_loops = test_loop_count
753                     try:
754                         _test_loops = int(_test_loops)
755                     except:
756                         continue
757                     result[_test_id] = _test_loops
758         return result
759
760     def get_test_loop_count(self, test_id):
761         """ This function returns no. of loops per test (deducted by test_id_.
762             If test is not in list of redefined loop counts it will use default value.
763         """
764         result = self.GLOBAL_LOOPS_COUNT
765         if test_id in self.TEST_LOOPS_DICT:
766             result = self.TEST_LOOPS_DICT[test_id]
767         return result
768
769     def delete_file(self, file_path):
770         """ Remove file from the system
771         """
772         result = True
773         resutl_msg = ""
774         try:
775             os.remove(file_path)
776         except Exception, e:
777             resutl_msg = e
778             result = False
779         return result, resutl_msg
780
781     def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
782         """ Function determines MUT's mbed disk/port and copies binary to
783             target.
784             Test is being invoked afterwards.
785         """
786         data = json.loads(test_spec)
787         # Get test information, image and test timeout
788         test_id = data['test_id']
789         test = TEST_MAP[test_id]
790         test_description = TEST_MAP[test_id].get_description()
791         image = data["image"]
792         duration = data.get("duration", 10)
793
794         # Find a suitable MUT:
795         mut = None
796         for id, m in self.muts.iteritems():
797             if m['mcu'] == data['mcu']:
798                 mut = m
799                 break
800
801         if mut is None:
802             print "Error: No Mbed available: MUT[%s]" % data['mcu']
803             return None
804
805         disk = mut.get('disk')
806         port = mut.get('port')
807
808         if disk is None or port is None:
809             return None
810
811         target_by_mcu = TARGET_MAP[mut['mcu']]
812         # Some extra stuff can be declared in MUTs structure
813         reset_type = mut.get('reset_type')  # reboot.txt, reset.txt, shutdown.txt
814         reset_tout = mut.get('reset_tout')  # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
815         image_dest = mut.get('image_dest')  # Image file destination DISK + IMAGE_DEST + BINARY_NAME
816         images_config = mut.get('images_config')    # Available images selection via config file
817         mobo_config = mut.get('mobo_config')        # Available board configuration selection e.g. core selection etc.
818         copy_method = mut.get('copy_method')        # Available board configuration selection e.g. core selection etc.
819
820         # When the build and test system were separate, this was relative to a
821         # base network folder base path: join(NETWORK_BASE_PATH, )
822         image_path = image
823
824         if self.db_logger:
825             self.db_logger.reconnect()
826
827         selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
828
829         # Tests can be looped so test results must be stored for the same test
830         test_all_result = []
831         # Test results for one test ran few times
832         detailed_test_results = {}  # { Loop_number: { results ... } }
833
834         for test_index in range(test_loops):
835             # Host test execution
836             start_host_exec_time = time()
837
838             single_test_result = self.TEST_RESULT_UNDEF # single test run result
839             _copy_method = selected_copy_method
840
841             if not exists(image_path):
842                 single_test_result = self.TEST_RESULT_NO_IMAGE
843                 elapsed_time = 0
844                 single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
845                 print single_test_output
846             else:
847                 # Host test execution
848                 start_host_exec_time = time()
849
850                 host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
851                 host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
852                 host_test_result = self.run_host_test(test.host_test,
853                                                       image_path, disk, port, duration,
854                                                       micro=target_name,
855                                                       verbose=host_test_verbose,
856                                                       reset=host_test_reset,
857                                                       reset_tout=reset_tout,
858                                                       copy_method=selected_copy_method,
859                                                       program_cycle_s=target_by_mcu.program_cycle_s())
860                 single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
861
862             # Store test result
863             test_all_result.append(single_test_result)
864             total_elapsed_time = time() - start_host_exec_time   # Test time with copy (flashing) / reset
865             elapsed_time = single_testduration  # TIme of single test case execution after reset
866
867             detailed_test_results[test_index] = {
868                 'single_test_result' : single_test_result,
869                 'single_test_output' : single_test_output,
870                 'target_name' : target_name,
871                 'toolchain_name' : toolchain_name,
872                 'test_id' : test_id,
873                 'test_description' : test_description,
874                 'elapsed_time' : round(elapsed_time, 2),
875                 'duration' : single_timeout,
876                 'copy_method' : _copy_method,
877             }
878
879             print self.print_test_result(single_test_result, target_name, toolchain_name,
880                                          test_id, test_description, elapsed_time, single_timeout)
881
882             # Update database entries for ongoing test
883             if self.db_logger and self.db_logger.is_connected():
884                 test_type = 'SingleTest'
885                 self.db_logger.insert_test_entry(self.db_logger_build_id,
886                                                  target_name,
887                                                  toolchain_name,
888                                                  test_type,
889                                                  test_id,
890                                                  single_test_result,
891                                                  single_test_output,
892                                                  elapsed_time,
893                                                  single_timeout,
894                                                  test_index)
895
896             # If we perform waterfall test we test until we get OK and we stop testing
897             if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
898                 break
899
900         if self.db_logger:
901             self.db_logger.disconnect()
902
903         return (self.shape_global_test_loop_result(test_all_result),
904                 target_name,
905                 toolchain_name,
906                 test_id,
907                 test_description,
908                 round(elapsed_time, 2),
909                 single_timeout,
910                 self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
911
912     def print_test_result(self, test_result, target_name, toolchain_name,
913                           test_id, test_description, elapsed_time, duration):
914         """ Use specific convention to print test result and related data
915         """
916         tokens = []
917         tokens.append("TargetTest")
918         tokens.append(target_name)
919         tokens.append(toolchain_name)
920         tokens.append(test_id)
921         tokens.append(test_description)
922         separator = "::"
923         time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
924         result = separator.join(tokens) + " [" + test_result +"]" + time_info
925         return Fore.MAGENTA + result + Fore.RESET
926
927     def shape_test_loop_ok_result_count(self, test_all_result):
928         """ Reformats list of results to simple string
929         """
930         test_loop_count = len(test_all_result)
931         test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
932         return "%d/%d"% (test_loop_ok_result, test_loop_count)
933
934     def shape_global_test_loop_result(self, test_all_result):
935         """ Reformats list of results to simple string
936         """
937         result = self.TEST_RESULT_FAIL
938         if all(test_all_result[0] == res for res in test_all_result):
939             result = test_all_result[0]
940         return result
941
942     def run_host_test(self, name, image_path, disk, port, duration,
943                       micro=None, reset=None, reset_tout=None,
944                       verbose=False, copy_method=None, program_cycle_s=None):
945         """ Function creates new process with host test configured with particular test case.
946             Function also is pooling for serial port activity from process to catch all data
947             printed by test runner and host test during test execution
948         """
949
950         def get_char_from_queue(obs):
951             """ Get character from queue safe way
952             """
953             try:
954                 c = obs.queue.get(block=True, timeout=0.5)
955             except Empty, _:
956                 c = None
957             return c
958
959         def filter_queue_char(c):
960             """ Filters out non ASCII characters from serial port
961             """
962             if ord(c) not in range(128):
963                 c = ' '
964             return c
965
966         def get_test_result(output):
967             """ Parse test 'output' data
968             """
969             result = self.TEST_RESULT_TIMEOUT
970             for line in "".join(output).splitlines():
971                 search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
972                 if search_result and len(search_result.groups()):
973                     result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
974                     break
975             return result
976
977         def get_auto_property_value(property_name, line):
978             """ Scans auto detection line from MUT and returns scanned parameter 'property_name'
979                 Returns string
980             """
981             result = None
982             if re.search("HOST: Property '%s'"% property_name, line) is not None:
983                 property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
984                 if property is not None and len(property.groups()) == 1:
985                     result = property.groups()[0]
986             return result
987
988         # print "{%s} port:%s disk:%s"  % (name, port, disk),
989         cmd = ["python",
990                '%s.py'% name,
991                '-d', disk,
992                '-f', '"%s"'% image_path,
993                '-p', port,
994                '-t', str(duration),
995                '-C', str(program_cycle_s)]
996
997         # Add extra parameters to host_test
998         if copy_method is not None:
999             cmd += ["-c", copy_method]
1000         if micro is not None:
1001             cmd += ["-m", micro]
1002         if reset is not None:
1003             cmd += ["-r", reset]
1004         if reset_tout is not None:
1005             cmd += ["-R", str(reset_tout)]
1006
1007         if verbose:
1008             print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
1009             print "Test::Output::Start"
1010
1011         proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
1012         obs = ProcessObserver(proc)
1013         update_once_flag = {}   # Stores flags checking if some auto-parameter was already set
1014         line = ''
1015         output = []
1016         start_time = time()
1017         while (time() - start_time) < (2 * duration):
1018             c = get_char_from_queue(obs)
1019             if c:
1020                 if verbose:
1021                     sys.stdout.write(c)
1022                 c = filter_queue_char(c)
1023                 output.append(c)
1024                 # Give the mbed under test a way to communicate the end of the test
1025                 if c in ['\n', '\r']:
1026
1027                     # Checking for auto-detection information from the test about MUT reset moment
1028                     if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
1029                         # We will update this marker only once to prevent multiple time resets
1030                         update_once_flag['reset_target'] = True
1031                         start_time = time()
1032
1033                     # Checking for auto-detection information from the test about timeout
1034                     auto_timeout_val = get_auto_property_value('timeout', line)
1035                     if 'timeout' not in update_once_flag and auto_timeout_val is not None:
1036                         # We will update this marker only once to prevent multiple time resets
1037                         update_once_flag['timeout'] = True
1038                         duration = int(auto_timeout_val)
1039
1040                     # Detect mbed assert:
1041                     if 'mbed assertation failed: ' in line:
1042                         output.append('{{mbed_assert}}')
1043                         break
1044
1045                     # Check for test end
1046                     if '{end}' in line:
1047                         break
1048                     line = ''
1049                 else:
1050                     line += c
1051         end_time = time()
1052         testcase_duration = end_time - start_time   # Test case duration from reset to {end}
1053
1054         c = get_char_from_queue(obs)
1055
1056         if c:
1057             if verbose:
1058                 sys.stdout.write(c)
1059             c = filter_queue_char(c)
1060             output.append(c)
1061
1062         if verbose:
1063             print "Test::Output::Finish"
1064         # Stop test process
1065         obs.stop()
1066
1067         result = get_test_result(output)
1068         return (result, "".join(output), testcase_duration, duration)
1069
1070     def is_peripherals_available(self, target_mcu_name, peripherals=None):
1071         """ Checks if specified target should run specific peripheral test case defined in MUTs file
1072         """
1073         if peripherals is not None:
1074             peripherals = set(peripherals)
1075         for id, mut in self.muts.iteritems():
1076             # Target MCU name check
1077             if mut["mcu"] != target_mcu_name:
1078                 continue
1079             # Peripherals check
1080             if peripherals is not None:
1081                 if 'peripherals' not in mut:
1082                     continue
1083                 if not peripherals.issubset(set(mut['peripherals'])):
1084                     continue
1085             return True
1086         return False
1087
1088     def shape_test_request(self, mcu, image_path, test_id, duration=10):
1089         """ Function prepares JSON structure describing test specification
1090         """
1091         test_spec = {
1092             "mcu": mcu,
1093             "image": image_path,
1094             "duration": duration,
1095             "test_id": test_id,
1096         }
1097         return json.dumps(test_spec)
1098
1099
1100 def get_unique_value_from_summary(test_summary, index):
1101     """ Gets list of unique target names
1102     """
1103     result = []
1104     for test in test_summary:
1105         target_name = test[index]
1106         if target_name not in result:
1107             result.append(target_name)
1108     return sorted(result)
1109
1110
1111 def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
1112     """ Gets list of unique target names and return dictionary
1113     """
1114     result = {}
1115     for test in test_summary:
1116         key = test[index_key]
1117         val = test[index_val]
1118         if key not in result:
1119             result[key] = val
1120     return result
1121
1122
1123 def show_json_file_format_error(json_spec_filename, line, column):
1124     """ Prints JSON broken content
1125     """
1126     with open(json_spec_filename) as data_file:
1127         line_no = 1
1128         for json_line in data_file:
1129             if line_no + 5 >= line: # Print last few lines before error
1130                 print 'Line %d:\t'%line_no + json_line, # Prints line
1131             if line_no == line:
1132                 print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
1133                 break
1134             line_no += 1
1135
1136
1137 def json_format_error_defect_pos(json_error_msg):
1138     """ Gets first error line and column in JSON file format.
1139         Parsed from exception thrown by json.loads() string
1140     """
1141     result = None
1142     line, column = 0, 0
1143     # Line value search
1144     line_search = re.search('line [0-9]+', json_error_msg)
1145     if line_search is not None:
1146         ls = line_search.group().split(' ')
1147         if len(ls) == 2:
1148             line = int(ls[1])
1149             # Column position search
1150             column_search = re.search('column [0-9]+', json_error_msg)
1151             if column_search is not None:
1152                 cs = column_search.group().split(' ')
1153                 if len(cs) == 2:
1154                     column = int(cs[1])
1155                     result = [line, column]
1156     return result
1157
1158
1159 def get_json_data_from_file(json_spec_filename, verbose=False):
1160     """ Loads from file JSON formatted string to data structure
1161     """
1162     result = None
1163     try:
1164         with open(json_spec_filename) as data_file:
1165             try:
1166                 result = json.load(data_file)
1167             except ValueError as json_error_msg:
1168                 result = None
1169                 print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
1170                 # We can print where error occurred inside JSON file if we can parse exception msg
1171                 json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
1172                 if json_format_defect_pos is not None:
1173                     line = json_format_defect_pos[0]
1174                     column = json_format_defect_pos[1]
1175                     print
1176                     show_json_file_format_error(json_spec_filename, line, column)
1177
1178     except IOError as fileopen_error_msg:
1179         print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
1180         print
1181     if verbose and result:
1182         pp = pprint.PrettyPrinter(indent=4)
1183         pp.pprint(result)
1184     return result
1185
1186
1187 def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
1188     """ Prints MUTs configuration passed to test script for verboseness
1189     """
1190     muts_info_cols = []
1191     # We need to check all unique properties for each defined MUT
1192     for k in json_data:
1193         mut_info = json_data[k]
1194         for mut_property in mut_info:
1195             if mut_property not in muts_info_cols:
1196                 muts_info_cols.append(mut_property)
1197
1198     # Prepare pretty table object to display all MUTs
1199     pt_cols = ["index"] + muts_info_cols
1200     pt = PrettyTable(pt_cols)
1201     for col in pt_cols:
1202         pt.align[col] = "l"
1203
1204     # Add rows to pretty print object
1205     for k in json_data:
1206         row = [k]
1207         mut_info = json_data[k]
1208
1209         add_row = True
1210         if platform_filter and 'mcu' in mut_info:
1211             add_row = re.search(platform_filter, mut_info['mcu']) is not None
1212         if add_row:
1213             for col in muts_info_cols:
1214                 cell_val = mut_info[col] if col in mut_info else None
1215                 if type(cell_val) == ListType:
1216                     cell_val = join_delim.join(cell_val)
1217                 row.append(cell_val)
1218             pt.add_row(row)
1219     return pt.get_string()
1220
1221
1222 def print_test_configuration_from_json(json_data, join_delim=", "):
1223     """ Prints test specification configuration passed to test script for verboseness
1224     """
1225     toolchains_info_cols = []
1226     # We need to check all toolchains for each device
1227     for k in json_data:
1228         # k should be 'targets'
1229         targets = json_data[k]
1230         for target in targets:
1231             toolchains = targets[target]
1232             for toolchain in toolchains:
1233                 if toolchain not in toolchains_info_cols:
1234                     toolchains_info_cols.append(toolchain)
1235
1236     # Prepare pretty table object to display test specification
1237     pt_cols = ["mcu"] + sorted(toolchains_info_cols)
1238     pt = PrettyTable(pt_cols)
1239     for col in pt_cols:
1240         pt.align[col] = "l"
1241
1242     # { target : [conflicted toolchains] }
1243     toolchain_conflicts = {}
1244     toolchain_path_conflicts = []
1245     for k in json_data:
1246         # k should be 'targets'
1247         targets = json_data[k]
1248         for target in targets:
1249             target_supported_toolchains = get_target_supported_toolchains(target)
1250             if not target_supported_toolchains:
1251                 target_supported_toolchains = []
1252             target_name = target if target in TARGET_MAP else "%s*"% target
1253             row = [target_name]
1254             toolchains = targets[target]
1255
1256             for toolchain in sorted(toolchains_info_cols):
1257                 # Check for conflicts: target vs toolchain
1258                 conflict = False
1259                 conflict_path = False
1260                 if toolchain in toolchains:
1261                     if toolchain not in target_supported_toolchains:
1262                         conflict = True
1263                         if target not in toolchain_conflicts:
1264                             toolchain_conflicts[target] = []
1265                         toolchain_conflicts[target].append(toolchain)
1266                 # Add marker inside table about target usage / conflict
1267                 cell_val = 'Yes' if toolchain in toolchains else '-'
1268                 if conflict:
1269                     cell_val += '*'
1270                 # Check for conflicts: toolchain vs toolchain path
1271                 if toolchain in TOOLCHAIN_BIN_PATH:
1272                     toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
1273                     if not os.path.isdir(toolchain_path):
1274                         conflict_path = True
1275                         if toolchain not in toolchain_path_conflicts:
1276                             toolchain_path_conflicts.append(toolchain)
1277                 if conflict_path:
1278                     cell_val += '#'
1279                 row.append(cell_val)
1280             pt.add_row(row)
1281
1282     # generate result string
1283     result = pt.get_string()    # Test specification table
1284     if toolchain_conflicts or toolchain_path_conflicts:
1285         result += "\n"
1286         result += "Toolchain conflicts:\n"
1287         for target in toolchain_conflicts:
1288             if target not in TARGET_MAP:
1289                 result += "\t* Target %s unknown\n"% (target)
1290             conflict_target_list = join_delim.join(toolchain_conflicts[target])
1291             sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
1292             result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
1293
1294         for toolchain in toolchain_path_conflicts:
1295         # Let's check toolchain configuration
1296             if toolchain in TOOLCHAIN_BIN_PATH:
1297                 toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
1298                 if not os.path.isdir(toolchain_path):
1299                     result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
1300     return result
1301
1302
1303 def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
1304     """ Generates table summary with all test cases and additional test cases
1305         information using pretty print functionality. Allows test suite user to
1306         see test cases
1307     """
1308     # get all unique test ID prefixes
1309     unique_test_id = []
1310     for test in TESTS:
1311         split = test['id'].split('_')[:-1]
1312         test_id_prefix = '_'.join(split)
1313         if test_id_prefix not in unique_test_id:
1314             unique_test_id.append(test_id_prefix)
1315     unique_test_id.sort()
1316     counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
1317     counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
1318
1319     test_properties = ['id',
1320                        'automated',
1321                        'description',
1322                        'peripherals',
1323                        'host_test',
1324                        'duration'] if cols is None else cols
1325
1326     # All tests status table print
1327     pt = PrettyTable(test_properties)
1328     for col in test_properties:
1329         pt.align[col] = "l"
1330     pt.align['duration'] = "r"
1331
1332     counter_all = 0
1333     counter_automated = 0
1334     pt.padding_width = 1 # One space between column edges and contents (default)
1335
1336     for test_id in sorted(TEST_MAP.keys()):
1337         if platform_filter is not None:
1338             # FIlter out platforms using regex
1339             if re.search(platform_filter, test_id) is None:
1340                 continue
1341         row = []
1342         test = TEST_MAP[test_id]
1343         split = test_id.split('_')[:-1]
1344         test_id_prefix = '_'.join(split)
1345
1346         for col in test_properties:
1347             col_value = test[col]
1348             if type(test[col]) == ListType:
1349                 col_value = join_delim.join(test[col])
1350             elif test[col] == None:
1351                 col_value = "-"
1352
1353             row.append(col_value)
1354         if test['automated'] == True:
1355             counter_dict_test_id_types[test_id_prefix] += 1
1356             counter_automated += 1
1357         pt.add_row(row)
1358         # Update counters
1359         counter_all += 1
1360         counter_dict_test_id_types_all[test_id_prefix] += 1
1361     result = pt.get_string()
1362     result += "\n\n"
1363
1364     if result_summary and not platform_filter:
1365         # Automation result summary
1366         test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
1367         pt = PrettyTable(test_id_cols)
1368         pt.align['automated'] = "r"
1369         pt.align['all'] = "r"
1370         pt.align['percent [%]'] = "r"
1371
1372         percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
1373         str_progress = progress_bar(percent_progress, 75)
1374         pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
1375         result += "Automation coverage:\n"
1376         result += pt.get_string()
1377         result += "\n\n"
1378
1379         # Test automation coverage table print
1380         test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
1381         pt = PrettyTable(test_id_cols)
1382         pt.align['id'] = "l"
1383         pt.align['automated'] = "r"
1384         pt.align['all'] = "r"
1385         pt.align['percent [%]'] = "r"
1386         for unique_id in unique_test_id:
1387             # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
1388             percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
1389             str_progress = progress_bar(percent_progress, 75)
1390             row = [unique_id,
1391                    counter_dict_test_id_types[unique_id],
1392                    counter_dict_test_id_types_all[unique_id],
1393                    percent_progress,
1394                    "[" + str_progress + "]"]
1395             pt.add_row(row)
1396         result += "Test automation coverage:\n"
1397         result += pt.get_string()
1398         result += "\n\n"
1399     return result
1400
1401
1402 def progress_bar(percent_progress, saturation=0):
1403     """ This function creates progress bar with optional simple saturation mark
1404     """
1405     step = int(percent_progress / 2)    # Scale by to (scale: 1 - 50)
1406     str_progress = '#' * step + '.' * int(50 - step)
1407     c = '!' if str_progress[38] == '.' else '|'
1408     if saturation > 0:
1409         saturation = saturation / 2
1410         str_progress = str_progress[:saturation] + c + str_progress[saturation:]
1411     return str_progress
1412
1413
1414 def singletest_in_cli_mode(single_test):
1415     """ Runs SingleTestRunner object in CLI (Command line interface) mode
1416     """
1417     start = time()
1418     # Execute tests depending on options and filter applied
1419     test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report = single_test.execute()
1420     elapsed_time = time() - start
1421
1422     # Human readable summary
1423     if not single_test.opts_suppress_summary:
1424         # prints well-formed summary with results (SQL table like)
1425         print single_test.generate_test_summary(test_summary, shuffle_seed)
1426     if single_test.opts_test_x_toolchain_summary:
1427         # prints well-formed summary with results (SQL table like)
1428         # table shows text x toolchain test result matrix
1429         print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
1430     print "Completed in %.2f sec"% (elapsed_time)
1431
1432     # Store extra reports in files
1433     if single_test.opts_report_html_file_name:
1434         # Export results in form of HTML report to separate file
1435         report_exporter = ReportExporter(ResultExporterType.HTML)
1436         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
1437     if single_test.opts_report_junit_file_name:
1438         # Export results in form of JUnit XML report to separate file
1439         report_exporter = ReportExporter(ResultExporterType.JUNIT)
1440         report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
1441     if single_test.opts_report_build_file_name:
1442         # Export build results as html report to sparate file
1443         write_build_report(build_report, 'tests_build/report.html', single_test.opts_report_build_file_name)
1444
1445
1446 class TestLogger():
1447     """ Super-class for logging and printing ongoing events for test suite pass
1448     """
1449     def __init__(self, store_log=True):
1450         """ We can control if logger actually stores log in memory
1451             or just handled all log entries immediately
1452         """
1453         self.log = []
1454         self.log_to_file = False
1455         self.log_file_name = None
1456         self.store_log = store_log
1457
1458         self.LogType = construct_enum(INFO='Info',
1459                                       WARN='Warning',
1460                                       NOTIF='Notification',
1461                                       ERROR='Error',
1462                                       EXCEPT='Exception')
1463
1464         self.LogToFileAttr = construct_enum(CREATE=1,    # Create or overwrite existing log file
1465                                             APPEND=2)    # Append to existing log file
1466
1467     def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
1468         """ Log one line of text
1469         """
1470         log_timestamp = time()
1471         log_entry = {'log_type' : LogType,
1472                      'log_timestamp' : log_timestamp,
1473                      'log_line' : log_line,
1474                      '_future' : None
1475         }
1476         # Store log in memory
1477         if self.store_log:
1478             self.log.append(log_entry)
1479         return log_entry
1480
1481
1482 class CLITestLogger(TestLogger):
1483     """ Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
1484     """
1485     def __init__(self, store_log=True, file_name=None):
1486         TestLogger.__init__(self)
1487         self.log_file_name = file_name
1488         #self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
1489         self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
1490
1491     def log_print(self, log_entry, timestamp=True):
1492         """ Prints on screen formatted log entry
1493         """
1494         ts = log_entry['log_timestamp']
1495         timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
1496         log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
1497         return timestamp_str + log_line_str
1498
1499     def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
1500         """ Logs line, if log file output was specified log line will be appended
1501             at the end of log file
1502         """
1503         log_entry = TestLogger.log_line(self, LogType, log_line)
1504         log_line_str = self.log_print(log_entry, timestamp)
1505         if self.log_file_name is not None:
1506             try:
1507                 with open(self.log_file_name, 'a') as f:
1508                     f.write(log_line_str + line_delim)
1509             except IOError:
1510                 pass
1511         return log_line_str
1512
1513
1514 def factory_db_logger(db_url):
1515     """ Factory database driver depending on database type supplied in database connection string db_url
1516     """
1517     if db_url is not None:
1518         from workspace_tools.test_mysql import MySQLDBAccess
1519         connection_info = BaseDBAccess().parse_db_connection_string(db_url)
1520         if connection_info is not None:
1521             (db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
1522             if db_type == 'mysql':
1523                 return MySQLDBAccess()
1524     return None
1525
1526
1527 def detect_database_verbose(db_url):
1528     """ uses verbose mode (prints) database detection sequence to check it database connection string is valid
1529     """
1530     result = BaseDBAccess().parse_db_connection_string(db_url)
1531     if result is not None:
1532         # Parsing passed
1533         (db_type, username, password, host, db_name) = result
1534         #print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
1535         # Let's try to connect
1536         db_ = factory_db_logger(db_url)
1537         if db_ is not None:
1538             print "Connecting to database '%s'..."% db_url,
1539             db_.connect(host, username, password, db_name)
1540             if db_.is_connected():
1541                 print "ok"
1542                 print "Detecting database..."
1543                 print db_.detect_database(verbose=True)
1544                 print "Disconnecting...",
1545                 db_.disconnect()
1546                 print "done"
1547         else:
1548             print "Database type '%s' unknown"% db_type
1549     else:
1550         print "Parse error: '%s' - DB Url error"% (db_url)
1551
1552
1553 def get_module_avail(module_name):
1554     """ This function returns True if module_name is already impored module
1555     """
1556     return module_name in sys.modules.keys()
1557
1558
1559 def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
1560     """ Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
1561         If function fails to auto-detect devices it will return empty dictionary.
1562
1563         if get_module_avail('mbed_lstools'):
1564             mbeds = mbed_lstools.create()
1565             mbeds_list = mbeds.list_mbeds()
1566
1567         @param mbeds_list list of mbeds captured from mbed_lstools
1568         @param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
1569     """
1570     result = {}   # Should be in muts_all.json format
1571     # Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
1572     # mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
1573     index = 1
1574     for mut in mbeds_list:
1575         m = {'mcu' : mut['platform_name'],
1576              'port' : mut['serial_port'],
1577              'disk' : mut['mount_point'],
1578              'peripherals' : []     # No peripheral detection
1579              }
1580         if index not in result:
1581             result[index] = {}
1582         result[index] = m
1583         index += 1
1584     return result
1585
1586
1587 def get_autodetected_TEST_SPEC(mbeds_list,
1588                                use_default_toolchain=True,
1589                                use_supported_toolchains=False,
1590                                toolchain_filter=None,
1591                                platform_name_filter=None):
1592     """ Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
1593         If function fails to auto-detect devices it will return empty 'targets' test_spec description.
1594
1595         use_default_toolchain - if True add default toolchain to test_spec
1596         use_supported_toolchains - if True add all supported toolchains to test_spec
1597         toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
1598     """
1599     result = {'targets': {} }
1600
1601     for mut in mbeds_list:
1602         mcu = mut['platform_name']
1603         if platform_name_filter is None or (platform_name_filter and mut['platform_name'] in platform_name_filter):
1604             if mcu in TARGET_MAP:
1605                 default_toolchain = TARGET_MAP[mcu].default_toolchain
1606                 supported_toolchains = TARGET_MAP[mcu].supported_toolchains
1607
1608                 # Decide which toolchains should be added to test specification toolchain pool for each target
1609                 toolchains = []
1610                 if use_default_toolchain:
1611                     toolchains.append(default_toolchain)
1612                 if use_supported_toolchains:
1613                     toolchains += supported_toolchains
1614                 if toolchain_filter is not None:
1615                     all_toolchains = supported_toolchains + [default_toolchain]
1616                     for toolchain in toolchain_filter.split(','):
1617                         if toolchain in all_toolchains:
1618                             toolchains.append(toolchain)
1619
1620                 result['targets'][mcu] = list(set(toolchains))
1621     return result
1622
1623
1624 def get_default_test_options_parser():
1625     """ Get common test script options used by CLI, web services etc.
1626     """
1627     parser = optparse.OptionParser()
1628     parser.add_option('-i', '--tests',
1629                       dest='test_spec_filename',
1630                       metavar="FILE",
1631                       help='Points to file with test specification')
1632
1633     parser.add_option('-M', '--MUTS',
1634                       dest='muts_spec_filename',
1635                       metavar="FILE",
1636                       help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
1637
1638     parser.add_option("-j", "--jobs",
1639                       dest='jobs',
1640                       metavar="NUMBER",
1641                       type="int",
1642                       help="Define number of compilation jobs. Default value is 1")
1643
1644     if get_module_avail('mbed_lstools'):
1645         # Additional features available when mbed_lstools is installed on host and imported
1646         # mbed_lstools allow users to detect connected to host mbed-enabled devices
1647         parser.add_option('', '--auto',
1648                           dest='auto_detect',
1649                           metavar=False,
1650                           action="store_true",
1651                           help='Use mbed-ls module to detect all connected mbed devices')
1652
1653         parser.add_option('', '--tc',
1654                           dest='toolchains_filter',
1655                           help="Toolchain filter for --auto option. Use toolcahins names separated by comma, 'default' or 'all' to select toolchains")
1656
1657     parser.add_option('', '--clean',
1658                       dest='clean',
1659                       metavar=False,
1660                       action="store_true",
1661                       help='Clean the build directory')
1662
1663     parser.add_option('-P', '--only-peripherals',
1664                       dest='test_only_peripheral',
1665                       default=False,
1666                       action="store_true",
1667                       help='Test only peripheral declared for MUT and skip common tests')
1668
1669     parser.add_option('-C', '--only-commons',
1670                       dest='test_only_common',
1671                       default=False,
1672                       action="store_true",
1673                       help='Test only board internals. Skip perpherials tests and perform common tests.')
1674
1675     parser.add_option('-n', '--test-by-names',
1676                       dest='test_by_names',
1677                       help='Runs only test enumerated it this switch. Use comma to separate test case names.')
1678
1679     parser.add_option('-p', '--peripheral-by-names',
1680                       dest='peripheral_by_names',
1681                       help='Forces discovery of particular peripherals. Use comma to separate peripheral names.')
1682
1683     copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
1684     copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
1685
1686     parser.add_option('-c', '--copy-method',
1687                       dest='copy_method',
1688                       help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
1689
1690     reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
1691     reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
1692
1693     parser.add_option('-r', '--reset-type',
1694                       dest='mut_reset_type',
1695                       default=None,
1696                       help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
1697
1698     parser.add_option('-g', '--goanna-for-tests',
1699                       dest='goanna_for_tests',
1700                       metavar=False,
1701                       action="store_true",
1702                       help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
1703
1704     parser.add_option('-G', '--goanna-for-sdk',
1705                       dest='goanna_for_mbed_sdk',
1706                       metavar=False,
1707                       action="store_true",
1708                       help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
1709
1710     parser.add_option('-s', '--suppress-summary',
1711                       dest='suppress_summary',
1712                       default=False,
1713                       action="store_true",
1714                       help='Suppresses display of wellformatted table with test results')
1715
1716     parser.add_option('-t', '--test-summary',
1717                       dest='test_x_toolchain_summary',
1718                       default=False,
1719                       action="store_true",
1720                       help='Displays wellformatted table with test x toolchain test result per target')
1721
1722     parser.add_option('-A', '--test-automation-report',
1723                       dest='test_automation_report',
1724                       default=False,
1725                       action="store_true",
1726                       help='Prints information about all tests and exits')
1727
1728     parser.add_option('-R', '--test-case-report',
1729                       dest='test_case_report',
1730                       default=False,
1731                       action="store_true",
1732                       help='Prints information about all test cases and exits')
1733
1734     parser.add_option("-S", "--supported-toolchains",
1735                       action="store_true",
1736                       dest="supported_toolchains",
1737                       default=False,
1738                       help="Displays supported matrix of MCUs and toolchains")
1739
1740     parser.add_option("-O", "--only-build",
1741                       action="store_true",
1742                       dest="only_build_tests",
1743                       default=False,
1744                       help="Only build tests, skips actual test procedures (flashing etc.)")
1745
1746     parser.add_option('', '--parallel',
1747                       dest='parallel_test_exec',
1748                       default=False,
1749                       action="store_true",
1750                       help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
1751
1752     parser.add_option('', '--config',
1753                       dest='verbose_test_configuration_only',
1754                       default=False,
1755                       action="store_true",
1756                       help='Displays full test specification and MUTs configration and exits')
1757
1758     parser.add_option('', '--loops',
1759                       dest='test_loops_list',
1760                       help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
1761
1762     parser.add_option('', '--global-loops',
1763                       dest='test_global_loops_value',
1764                       help='Set global number of test loops per test. Default value is set 1')
1765
1766     parser.add_option('-W', '--waterfall',
1767                       dest='waterfall_test',
1768                       default=False,
1769                       action="store_true",
1770                       help='Used with --loops or --global-loops options. Tests until OK result occurs and assumes test passed.')
1771
1772     parser.add_option('-N', '--firmware-name',
1773                       dest='firmware_global_name',
1774                       help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts.')
1775
1776     parser.add_option('-u', '--shuffle',
1777                       dest='shuffle_test_order',
1778                       default=False,
1779                       action="store_true",
1780                       help='Shuffles test execution order')
1781
1782     parser.add_option('', '--shuffle-seed',
1783                       dest='shuffle_test_seed',
1784                       default=None,
1785                       help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
1786
1787     parser.add_option('-f', '--filter',
1788                       dest='general_filter_regex',
1789                       default=None,
1790                       help='For some commands you can use filter to filter out results')
1791
1792     parser.add_option('', '--inc-timeout',
1793                       dest='extend_test_timeout',
1794                       metavar="NUMBER",
1795                       type="int",
1796                       help='You can increase global timeout for each test by specifying additional test timeout in seconds')
1797
1798     parser.add_option('', '--db',
1799                       dest='db_url',
1800                       help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
1801
1802     parser.add_option('-l', '--log',
1803                       dest='log_file_name',
1804                       help='Log events to external file (note not all console entries may be visible in log file)')
1805
1806     parser.add_option('', '--report-html',
1807                       dest='report_html_file_name',
1808                       help='You can log test suite results in form of HTML report')
1809
1810     parser.add_option('', '--report-junit',
1811                       dest='report_junit_file_name',
1812                       help='You can log test suite results in form of JUnit compliant XML report')
1813
1814     parser.add_option("", "--report-build",
1815                       dest="report_build_file_name",
1816                       help="Output the build results to an html file")
1817
1818     parser.add_option('', '--verbose-skipped',
1819                       dest='verbose_skipped_tests',
1820                       default=False,
1821                       action="store_true",
1822                       help='Prints some extra information about skipped tests')
1823
1824     parser.add_option('-V', '--verbose-test-result',
1825                       dest='verbose_test_result_only',
1826                       default=False,
1827                       action="store_true",
1828                       help='Prints test serial output')
1829
1830     parser.add_option('-v', '--verbose',
1831                       dest='verbose',
1832                       default=False,
1833                       action="store_true",
1834                       help='Verbose mode (prints some extra information)')
1835
1836     parser.add_option('', '--version',
1837                       dest='version',
1838                       default=False,
1839                       action="store_true",
1840                       help='Prints script version and exits')
1841     return parser