1 | import os |
2 | import rand |
3 | import term |
4 | import v.util.diff |
5 | import v.util.vtest |
6 | import time |
7 | import runtime |
8 | import benchmark |
9 | |
10 | const skip_files = [ |
11 | 'non_existing.vv', // minimize commit diff churn, do not remove |
12 | ] |
13 | |
14 | const skip_on_cstrict = [ |
15 | 'vlib/v/checker/tests/missing_c_lib_header_1.vv', |
16 | 'vlib/v/checker/tests/missing_c_lib_header_with_explanation_2.vv', |
17 | ] |
18 | |
19 | const skip_on_ubuntu_musl = [ |
20 | 'vlib/v/checker/tests/vweb_tmpl_used_var.vv', |
21 | 'vlib/v/checker/tests/vweb_routing_checks.vv', |
22 | 'vlib/v/tests/skip_unused/gg_code.vv', |
23 | ] |
24 | |
25 | const skip_on_ci_musl = [ |
26 | 'vlib/v/tests/skip_unused/gg_code.vv', |
27 | ] |
28 | |
29 | const vexe = os.getenv('VEXE') |
30 | |
31 | const turn_off_vcolors = os.setenv('VCOLORS', 'never', true) |
32 | |
33 | const show_cmd = os.getenv('VTEST_SHOW_CMD') != '' |
34 | |
35 | // This is needed, because some of the .vv files are tests, and we do need stable |
36 | // output from them, that can be compared against their .out files: |
37 | const turn_on_normal_test_runner = os.setenv('VTEST_RUNNER', 'normal', true) |
38 | |
39 | const should_autofix = os.getenv('VAUTOFIX') != '' |
40 | |
41 | const github_job = os.getenv('GITHUB_JOB') |
42 | |
43 | const v_ci_ubuntu_musl = os.getenv('V_CI_UBUNTU_MUSL').len > 0 |
44 | |
45 | const v_ci_musl = os.getenv('V_CI_MUSL').len > 0 |
46 | |
47 | const v_ci_cstrict = os.getenv('V_CI_CSTRICT').len > 0 |
48 | |
49 | struct TaskDescription { |
50 | vexe string |
51 | evars string |
52 | dir string |
53 | voptions string |
54 | result_extension string |
55 | path string |
56 | mut: |
57 | is_error bool |
58 | is_skipped bool |
59 | is_module bool |
60 | expected string |
61 | expected_out_path string |
62 | found___ string |
63 | took time.Duration |
64 | cli_cmd string |
65 | } |
66 | |
67 | struct Tasks { |
68 | vexe string |
69 | parallel_jobs int // 0 is using VJOBS, anything else is an override |
70 | label string |
71 | mut: |
72 | show_cmd bool |
73 | all []TaskDescription |
74 | } |
75 | |
76 | fn test_all() { |
77 | vroot := os.dir(vexe) |
78 | os.chdir(vroot) or {} |
79 | checker_dir := 'vlib/v/checker/tests' |
80 | checker_with_check_option_dir := 'vlib/v/checker/tests/with_check_option' |
81 | parser_dir := 'vlib/v/parser/tests' |
82 | scanner_dir := 'vlib/v/scanner/tests' |
83 | module_dir := '${checker_dir}/modules' |
84 | global_dir := '${checker_dir}/globals' |
85 | global_run_dir := '${checker_dir}/globals_run' |
86 | run_dir := '${checker_dir}/run' |
87 | skip_unused_dir := 'vlib/v/tests/skip_unused' |
88 | // |
89 | checker_tests := get_tests_in_dir(checker_dir, false).filter(!it.contains('with_check_option')) |
90 | parser_tests := get_tests_in_dir(parser_dir, false) |
91 | scanner_tests := get_tests_in_dir(scanner_dir, false) |
92 | global_tests := get_tests_in_dir(global_dir, false) |
93 | global_run_tests := get_tests_in_dir(global_run_dir, false) |
94 | module_tests := get_tests_in_dir(module_dir, true) |
95 | run_tests := get_tests_in_dir(run_dir, false) |
96 | skip_unused_dir_tests := get_tests_in_dir(skip_unused_dir, false) |
97 | checker_with_check_option_tests := get_tests_in_dir(checker_with_check_option_dir, |
98 | false) |
99 | mut tasks := Tasks{ |
100 | vexe: vexe |
101 | label: 'all tests' |
102 | } |
103 | tasks.add('', parser_dir, '', '.out', parser_tests, false) |
104 | tasks.add('', checker_dir, '', '.out', checker_tests, false) |
105 | tasks.add('', scanner_dir, '', '.out', scanner_tests, false) |
106 | tasks.add('', checker_dir, '-enable-globals run', '.run.out', ['globals_error.vv'], |
107 | false) |
108 | tasks.add('', global_run_dir, '-enable-globals run', '.run.out', global_run_tests, |
109 | false) |
110 | tasks.add('', global_dir, '-enable-globals', '.out', global_tests, false) |
111 | tasks.add('', module_dir, '-prod run', '.out', module_tests, true) |
112 | tasks.add('', run_dir, 'run', '.run.out', run_tests, false) |
113 | tasks.add('', checker_with_check_option_dir, '-check', '.out', checker_with_check_option_tests, |
114 | false) |
115 | tasks.run() |
116 | // |
117 | if os.user_os() == 'linux' { |
118 | mut skip_unused_tasks := Tasks{ |
119 | vexe: vexe |
120 | parallel_jobs: 1 |
121 | label: '-skip-unused tests' |
122 | } |
123 | skip_unused_tasks.add('', skip_unused_dir, 'run', '.run.out', skip_unused_dir_tests, |
124 | false) |
125 | skip_unused_tasks.add('', skip_unused_dir, '-d no_backtrace -skip-unused run', |
126 | '.skip_unused.run.out', skip_unused_dir_tests, false) |
127 | skip_unused_tasks.run() |
128 | } |
129 | // |
130 | if github_job == 'ubuntu-tcc' { |
131 | // This is done with tcc only, because the error output is compiler specific. |
132 | // Note: the tasks should be run serially, since they depend on |
133 | // setting and using environment variables. |
134 | mut cte_tasks := Tasks{ |
135 | vexe: vexe |
136 | parallel_jobs: 1 |
137 | label: 'comptime env tests' |
138 | } |
139 | cte_dir := '${checker_dir}/comptime_env' |
140 | files := get_tests_in_dir(cte_dir, false) |
141 | cte_tasks.add('', cte_dir, '-no-retry-compilation run', '.run.out', files, false) |
142 | cte_tasks.add_evars('VAR=/usr/include', '', cte_dir, '-no-retry-compilation run', |
143 | '.var.run.out', ['using_comptime_env.vv'], false) |
144 | cte_tasks.add_evars('VAR=/opt/invalid/path', '', cte_dir, '-no-retry-compilation run', |
145 | '.var_invalid.run.out', ['using_comptime_env.vv'], false) |
146 | cte_tasks.run() |
147 | } |
148 | mut ct_tasks := Tasks{ |
149 | vexe: vexe |
150 | parallel_jobs: 1 |
151 | label: 'comptime define tests' |
152 | } |
153 | ct_tasks.add_checked_run('-d mysymbol run', '.mysymbol.run.out', [ |
154 | 'custom_comptime_define_error.vv', |
155 | ]) |
156 | ct_tasks.add_checked_run('-d mydebug run', '.mydebug.run.out', [ |
157 | 'custom_comptime_define_if_flag.vv', |
158 | ]) |
159 | ct_tasks.add_checked_run('-d nodebug run', '.nodebug.run.out', [ |
160 | 'custom_comptime_define_if_flag.vv', |
161 | ]) |
162 | ct_tasks.add_checked_run('run', '.run.out', ['custom_comptime_define_if_debug.vv']) |
163 | ct_tasks.add_checked_run('-g run', '.g.run.out', [ |
164 | 'custom_comptime_define_if_debug.vv', |
165 | ]) |
166 | ct_tasks.add_checked_run('-cg run', '.cg.run.out', [ |
167 | 'custom_comptime_define_if_debug.vv', |
168 | ]) |
169 | ct_tasks.add_checked_run('-d debug run', '.debug.run.out', [ |
170 | 'custom_comptime_define_if_debug.vv', |
171 | ]) |
172 | ct_tasks.add_checked_run('-d debug -d bar run', '.debug.bar.run.out', [ |
173 | 'custom_comptime_define_if_debug.vv', |
174 | ]) |
175 | ct_tasks.run() |
176 | } |
177 | |
178 | fn (mut tasks Tasks) add_checked_run(voptions string, result_extension string, tests []string) { |
179 | checker_dir := 'vlib/v/checker/tests' |
180 | tasks.add('', checker_dir, voptions, result_extension, tests, false) |
181 | } |
182 | |
183 | fn (mut tasks Tasks) add(custom_vexe string, dir string, voptions string, result_extension string, tests []string, is_module bool) { |
184 | tasks.add_evars('', custom_vexe, dir, voptions, result_extension, tests, is_module) |
185 | } |
186 | |
187 | fn (mut tasks Tasks) add_evars(evars string, custom_vexe string, dir string, voptions string, result_extension string, tests []string, is_module bool) { |
188 | paths := vtest.filter_vtest_only(tests, basepath: dir) |
189 | for path in paths { |
190 | tasks.all << TaskDescription{ |
191 | evars: evars |
192 | vexe: if custom_vexe != '' { custom_vexe } else { tasks.vexe } |
193 | dir: dir |
194 | voptions: voptions |
195 | result_extension: result_extension |
196 | path: path |
197 | is_module: is_module |
198 | } |
199 | } |
200 | } |
201 | |
202 | fn bstep_message(mut bench benchmark.Benchmark, label string, msg string, sduration time.Duration) string { |
203 | return bench.step_message_with_label_and_duration(label, msg, sduration) |
204 | } |
205 | |
206 | // process an array of tasks in parallel, using no more than vjobs worker threads |
207 | fn (mut tasks Tasks) run() { |
208 | if tasks.all.len == 0 { |
209 | return |
210 | } |
211 | tasks.show_cmd = show_cmd |
212 | vjobs := if tasks.parallel_jobs > 0 { tasks.parallel_jobs } else { runtime.nr_jobs() } |
213 | mut bench := benchmark.new_benchmark() |
214 | bench.set_total_expected_steps(tasks.all.len) |
215 | mut work := chan TaskDescription{cap: tasks.all.len} |
216 | mut results := chan TaskDescription{cap: tasks.all.len} |
217 | mut m_skip_files := skip_files.clone() |
218 | if v_ci_ubuntu_musl { |
219 | m_skip_files << skip_on_ubuntu_musl |
220 | } |
221 | if v_ci_musl { |
222 | m_skip_files << skip_on_ci_musl |
223 | } |
224 | if v_ci_cstrict { |
225 | m_skip_files << skip_on_cstrict |
226 | } |
227 | $if noskip ? { |
228 | m_skip_files = [] |
229 | } |
230 | $if tinyc { |
231 | // Note: tcc does not support __has_include, so the detection mechanism |
232 | // used for the other compilers does not work. It still provides a |
233 | // cleaner error message, than a generic C error, but without the explanation. |
234 | m_skip_files << 'vlib/v/checker/tests/missing_c_lib_header_1.vv' |
235 | m_skip_files << 'vlib/v/checker/tests/missing_c_lib_header_with_explanation_2.vv' |
236 | } |
237 | $if msvc { |
238 | m_skip_files << 'vlib/v/checker/tests/asm_alias_does_not_exist.vv' |
239 | m_skip_files << 'vlib/v/checker/tests/asm_immutable_err.vv' |
240 | // TODO: investigate why MSVC regressed |
241 | m_skip_files << 'vlib/v/checker/tests/missing_c_lib_header_1.vv' |
242 | m_skip_files << 'vlib/v/checker/tests/missing_c_lib_header_with_explanation_2.vv' |
243 | } |
244 | $if windows { |
245 | m_skip_files << 'vlib/v/checker/tests/modules/deprecated_module' |
246 | } |
247 | for i in 0 .. tasks.all.len { |
248 | if tasks.all[i].path in m_skip_files { |
249 | tasks.all[i].is_skipped = true |
250 | } |
251 | work <- tasks.all[i] |
252 | } |
253 | work.close() |
254 | for _ in 0 .. vjobs { |
255 | spawn work_processor(work, results) |
256 | } |
257 | if github_job == '' { |
258 | println('') |
259 | } |
260 | mut line_can_be_erased := true |
261 | mut total_errors := 0 |
262 | for _ in 0 .. tasks.all.len { |
263 | mut task := TaskDescription{} |
264 | task = <-results |
265 | bench.step() |
266 | if task.is_skipped { |
267 | bench.skip() |
268 | eprintln(bstep_message(mut bench, benchmark.b_skip, task.path, task.took)) |
269 | line_can_be_erased = false |
270 | continue |
271 | } |
272 | if task.is_error { |
273 | total_errors++ |
274 | bench.fail() |
275 | eprintln(bstep_message(mut bench, benchmark.b_fail, task.path, task.took)) |
276 | println('============') |
277 | println('failed cmd: ${task.cli_cmd}') |
278 | println('expected_out_path: ${task.expected_out_path}') |
279 | println('============') |
280 | println('expected:') |
281 | println(task.expected) |
282 | println('============') |
283 | println('found:') |
284 | println(task.found___) |
285 | println('============\n') |
286 | diff_content(task.expected, task.found___) |
287 | line_can_be_erased = false |
288 | } else { |
289 | bench.ok() |
290 | assert true |
291 | if tasks.show_cmd { |
292 | eprintln(bstep_message(mut bench, benchmark.b_ok, '${task.cli_cmd}', task.took)) |
293 | } else { |
294 | if github_job == '' { |
295 | // local mode: |
296 | if line_can_be_erased { |
297 | term.clear_previous_line() |
298 | } |
299 | println(bstep_message(mut bench, benchmark.b_ok, task.path, task.took)) |
300 | } |
301 | } |
302 | line_can_be_erased = true |
303 | } |
304 | } |
305 | bench.stop() |
306 | eprintln(term.h_divider('-')) |
307 | eprintln(bench.total_message(tasks.label)) |
308 | if total_errors != 0 { |
309 | exit(1) |
310 | } |
311 | } |
312 | |
313 | // a single worker thread spends its time getting work from the `work` channel, |
314 | // processing the task, and then putting the task in the `results` channel |
315 | fn work_processor(work chan TaskDescription, results chan TaskDescription) { |
316 | for { |
317 | mut task := <-work or { break } |
318 | sw := time.new_stopwatch() |
319 | task.execute() |
320 | task.took = sw.elapsed() |
321 | results <- task |
322 | } |
323 | } |
324 | |
325 | // actual processing; Note: no output is done here at all |
326 | fn (mut task TaskDescription) execute() { |
327 | if task.is_skipped { |
328 | return |
329 | } |
330 | program := task.path |
331 | cmd_prefix := if task.evars.len > 0 { '${task.evars} ' } else { '' } |
332 | cli_cmd := '${cmd_prefix}${os.quoted_path(task.vexe)} ${task.voptions} ${os.quoted_path(program)}' |
333 | res := os.execute(cli_cmd) |
334 | expected_out_path := program.replace('.vv', '') + task.result_extension |
335 | task.expected_out_path = expected_out_path |
336 | task.cli_cmd = cli_cmd |
337 | if should_autofix && !os.exists(expected_out_path) { |
338 | os.create(expected_out_path) or { panic(err) } |
339 | } |
340 | mut expected := os.read_file(expected_out_path) or { panic(err) } |
341 | task.expected = clean_line_endings(expected) |
342 | task.found___ = clean_line_endings(res.output) |
343 | $if windows { |
344 | if task.is_module { |
345 | task.found___ = task.found___.replace_once('\\', '/') |
346 | } |
347 | } |
348 | if task.expected != task.found___ { |
349 | task.is_error = true |
350 | if should_autofix { |
351 | os.write_file(expected_out_path, res.output) or { panic(err) } |
352 | } |
353 | } |
354 | } |
355 | |
356 | fn clean_line_endings(s string) string { |
357 | mut res := s.trim_space() |
358 | res = res.replace(' \n', '\n') |
359 | res = res.replace(' \r\n', '\n') |
360 | res = res.replace('\r\n', '\n') |
361 | res = res.trim('\n') |
362 | return res |
363 | } |
364 | |
365 | fn diff_content(expected string, found string) { |
366 | diff_cmd := diff.find_working_diff_command() or { return } |
367 | println(term.bold(term.yellow('diff: '))) |
368 | println(diff.color_compare_strings(diff_cmd, rand.ulid(), expected, found)) |
369 | println('============\n') |
370 | } |
371 | |
372 | fn get_tests_in_dir(dir string, is_module bool) []string { |
373 | files := os.ls(dir) or { panic(err) } |
374 | mut tests := files.clone() |
375 | if !is_module { |
376 | tests = files.filter(it.ends_with('.vv')) |
377 | } else { |
378 | tests = files.filter(!it.ends_with('.out')) |
379 | } |
380 | tests.sort() |
381 | return tests |
382 | } |