Mercurial > fife-parpg
comparison test_fife.py @ 0:4a0efb7baf70
* Datasets becomes the new trunk and retires after that :-)
author | mvbarracuda@33b003aa-7bff-0310-803a-e67f0ece8222 |
---|---|
date | Sun, 29 Jun 2008 18:44:17 +0000 |
parents | |
children | 64738befdf3b |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:4a0efb7baf70 |
---|---|
1 #!/usr/bin/env python | |
2 import os, re, sys, optparse, unittest | |
3 | |
4 def genpath(somepath): | |
5 return os.path.sep.join(somepath.split('/')) | |
6 | |
7 def print_header(text): | |
8 print '\n' | |
9 print 80 * '=' | |
10 print text | |
11 print 80 * '-' | |
12 | |
13 def resolve_test_progs(sconscript_filename): | |
14 """ Get the names of all test programs by evaluating the SConscript file """ | |
15 reprg = re.compile(r"""^env.Program\(["'](.*?)['"]""") | |
16 progs = [] | |
17 for line in open(sconscript_filename): | |
18 m = reprg.match(line.strip()) | |
19 if m: | |
20 progs.append(m.group(1)) | |
21 return progs | |
22 | |
23 def resolve_test_modules(directory): | |
24 pythonfilenames = [p for p in os.listdir(directory) if len(p) > 3 and p[-3:] == '.py'] | |
25 modname = directory.replace(os.path.sep, '.') + '.' | |
26 modules = [] | |
27 skipped_filenames = ('test_all.py',) | |
28 for p in pythonfilenames: | |
29 skip = False | |
30 for s in skipped_filenames: | |
31 if p.find(s) != -1: | |
32 skip = True | |
33 if p[0] == '_': | |
34 skip = True | |
35 if not skip: | |
36 modules.append(modname + p[:-3]) | |
37 return modules | |
38 | |
39 def run_core_tests(progs): | |
40 prevdir = os.getcwd() | |
41 os.chdir(genpath('tests/core_tests')) | |
42 | |
43 errors, failures = [], [] | |
44 for prog in progs: | |
45 print '\n===== Running %s =====' % prog | |
46 if os.system(os.sep.join(('.', prog))): | |
47 errors.append(prog) | |
48 os.chdir(prevdir) | |
49 return errors, failures | |
50 | |
51 def get_dynamic_imports(modules): | |
52 imported = [] | |
53 for module in modules: | |
54 m = __import__(module) | |
55 for part in module.split('.')[1:]: | |
56 m = getattr(m, part) | |
57 imported.append(m) | |
58 return imported | |
59 | |
60 def run_test_modules(modules): | |
61 imported = get_dynamic_imports(modules) | |
62 suites = [] | |
63 for m in imported: | |
64 try: | |
65 for c in m.__dict__['TEST_CLASSES']: | |
66 suites.append(unittest.TestLoader().loadTestsFromTestCase(c)) | |
67 except (AttributeError, KeyError): | |
68 pass | |
69 mastersuite = unittest.TestSuite(suites) | |
70 runner = unittest.TextTestRunner(verbosity=2) | |
71 result = runner.run(mastersuite) | |
72 return [e[1] for e in result.errors], [f[1] for f in result.failures] | |
73 | |
74 def run_analyzers(modules): | |
75 errors = [] | |
76 imported = get_dynamic_imports(modules) | |
77 for m in imported: | |
78 analyzefn = None | |
79 try: | |
80 analyzefn = m.__dict__['_ANALYZE_FN_'] | |
81 except (KeyError): | |
82 pass | |
83 if analyzefn: | |
84 error = analyzefn() | |
85 if error: | |
86 errors.append(error) | |
87 return errors | |
88 | |
89 def run_all(tests): | |
90 def print_errors(txt, errs): | |
91 if errs: | |
92 print txt + ':' | |
93 for msg in errs: | |
94 print ' ' + msg | |
95 | |
96 core_errors, core_failures = run_core_tests(tests['core']) | |
97 swig_errors, swig_failures = run_test_modules(tests['swig']) | |
98 ext_errors, ext_failures = run_test_modules(tests['ext']) | |
99 analyzer_errors = run_analyzers(tests['analyzer']) | |
100 | |
101 print 80 * '=' | |
102 errorsfound = False | |
103 | |
104 if core_errors or core_failures: | |
105 print_errors('Errors in core tests', core_errors) | |
106 print_errors('Failures in core tests', core_failures) | |
107 errorsfound = True | |
108 else: | |
109 print 'No Core errors found' | |
110 | |
111 if swig_errors or swig_failures: | |
112 print_errors('Errors in SWIG tests', swig_errors) | |
113 print_errors('Failures in SWIG tests', swig_failures) | |
114 errorsfound = True | |
115 else: | |
116 print 'No SWIG errors found' | |
117 | |
118 if swig_errors or swig_failures: | |
119 print_errors('Errors in extensions tests', ext_errors) | |
120 print_errors('Failures in extensions tests', ext_failures) | |
121 errorsfound = True | |
122 else: | |
123 print 'No Extensions errors found' | |
124 | |
125 if analyzer_errors: | |
126 print_errors('Errors in Analyzers', analyzer_errors) | |
127 errorsfound = True | |
128 else: | |
129 print 'No Analyzer errors found' | |
130 | |
131 print 80 * '=' | |
132 if errorsfound: | |
133 print 'Looks like there are some errors in the code, svn commit is probably not a good idea yet...' | |
134 else: | |
135 print 'All tests ran succesfully!' | |
136 print '' | |
137 | |
138 def quit(dummy): | |
139 sys.exit(0) | |
140 | |
141 def run(automatic, selected_cases): | |
142 index = 0 | |
143 tests = {} | |
144 | |
145 core_tests = resolve_test_progs(genpath('tests/core_tests/SConscript')) | |
146 for t in core_tests: | |
147 tests[index] = ('Core tests', t, [t], run_core_tests) | |
148 index += 1 | |
149 tests[index] = ('Core tests', 'all', core_tests, run_core_tests) | |
150 index += 1 | |
151 | |
152 swig_tests = resolve_test_modules(genpath('tests/swig_tests')) | |
153 for t in swig_tests: | |
154 tests[index] = ('SWIG tests', t, [t], run_test_modules) | |
155 index += 1 | |
156 tests[index] = ('SWIG tests', 'all', swig_tests, run_test_modules) | |
157 index += 1 | |
158 | |
159 extension_tests = resolve_test_modules(genpath('tests/extension_tests')) | |
160 for t in extension_tests: | |
161 tests[index] = ('Extension tests', t, [t], run_test_modules) | |
162 index += 1 | |
163 tests[index] = ('Extension tests', 'all', extension_tests, run_test_modules) | |
164 index += 1 | |
165 | |
166 analyzers = resolve_test_modules(genpath('tests/analyzers')) | |
167 for t in analyzers: | |
168 tests[index] = ('Analyzers', t, [t], run_analyzers) | |
169 index += 1 | |
170 | |
171 alltests = {'core': core_tests, 'swig': swig_tests, 'ext': extension_tests, 'analyzer': analyzers} | |
172 tests[index] = ('Other', 'Run all tests', alltests, run_all) | |
173 tests[index+1] = ('Other', 'Cancel and quit', None, quit) | |
174 | |
175 if (not automatic) and (not selected_cases): | |
176 selection = None | |
177 while True: | |
178 print 'Select test module to run:' | |
179 prevheader = '' | |
180 for ind in sorted(tests.keys()): | |
181 header, name, params, fn = tests[ind] | |
182 if header != prevheader: | |
183 print header | |
184 prevheader = header | |
185 print ' %d) %s' % (ind, name) | |
186 selection = raw_input('-> : ') | |
187 | |
188 try: | |
189 selection = int(selection) | |
190 if (selection < 0) or (selection > max(tests.keys())): | |
191 raise ValueError | |
192 break | |
193 except ValueError: | |
194 print 'Please enter number between 0-%d\n' % max(tests.keys()) | |
195 continue | |
196 header, name, params, fn = tests[selection] | |
197 fn(params) | |
198 elif (selected_cases): | |
199 for case in selected_cases: | |
200 try: | |
201 caseid = int(case) | |
202 if (caseid < 0) or (caseid > max(tests.keys())): | |
203 raise ValueError | |
204 header, name, params, fn = tests[caseid] | |
205 fn(params) | |
206 except ValueError: | |
207 print 'No test case with value %s found' % case | |
208 else: | |
209 run_all(alltests) | |
210 | |
211 def main(): | |
212 usage = 'usage: %prog [options] [args]\n' + \ | |
213 'Runs programs that test fife functionality. It is recommended that you run\n' + \ | |
214 'these tests always before svn commit\n' + \ | |
215 'you can give a list of test ids as arguments to the script. This is useful' + \ | |
216 'when running same tests over and over again with little changes. Available test ids' + \ | |
217 'can be seen from interactive menu (run script without any parameters)' | |
218 parser = optparse.OptionParser(usage) | |
219 parser.add_option("-a", "--automatic", | |
220 action="store_true", dest="automatic", default=False, | |
221 help="In case selected, runs all the tests automatically") | |
222 options, args = parser.parse_args() | |
223 run(options.automatic, args) | |
224 | |
225 | |
226 | |
227 | |
228 if __name__ == '__main__': | |
229 main() |