summaryrefslogtreecommitdiffstats
path: root/tests/run.sh
blob: e78c050e8e2cdfcbdd795626d2952f2882b0c10c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
#!/bin/sh

# basedir is the root of the test directory in the package
basedir="$(dirname "$0")"
[ "$(echo "$basedir" | cut -c1)" = '/' ] || basedir="$PWD/$basedir"

# rootdir is the root of the package
rootdir="$basedir/.."


failfile_write()
{
	(
		flock -x 9 || die "Failed to take lock"
		echo "$*" >> "$test_fail_file"
	) 9< "$test_fail_file"
}

infomsg()
{
	[ -z "$AWLSIM_TEST_QUIET" ] && echo "$@"
}

warnmsg()
{
	echo "WARNING: $@" >&2
}

errormsg()
{
	echo "$@" >&2
}

die()
{
	if [ -n "$*" ]; then
		errormsg "$*"
		# We might be in a sub-job. So write to fail-file.
		failfile_write "$*"
	fi
	exit 1
}

# Create a temporary file. $1=name, $2=subdir
maketemp()
{
	local prefix="$1"
	local subdir="$2"

	if [ -z "$subdir" ]; then
		local subdir="."
	else
		mkdir -p "$tmp_dir/$subdir"
	fi
	mktemp --tmpdir="$tmp_dir" "${subdir}/awlsim-test-${prefix}.XXXXXX"
}

# $1=message
test_failed()
{
	errormsg "=== TEST FAILED ==="
	die "$@"
}

cleanup()
{
	wait

	rm -rf "$tmp_dir" >/dev/null 2>&1
}

cleanup_and_exit()
{
	cleanup
	exit 1
}

# Get a configuration option.
# $1=configured_file
# $2=option_name
# ($3=default_value)
get_conf()
{
	local configured_file="$1"
	local option_name="$2"
	local default_value="$3"

	local conf="${configured_file}.conf"
	local val="$default_value"
	if [ -r "$conf" ]; then
		local regex="^${option_name}="
		if grep -qEe "$regex" "$conf"; then
			local val="$(grep -Ee "$regex" "$conf" | cut -d'=' -f2-)"
		fi
	fi
	printf '%s' "$val"
}

# Allocate a new port number.
get_port()
{
	(
		flock -x 8 || die "Failed to take port lock"

		local port="$(cat "$port_alloc_file")"
		local next="$(expr "$port" + 1)"
		echo "$next" > "$port_alloc_file" ||\
			die "Failed to update port allocation file"

		echo -n "$port"
	) 8> "${port_alloc_file}.lock"
}

# Returns true (0), if there are more than 1 jobs.
is_parallel_run()
{
	[ $opt_jobs -gt 1 ]
}

# Wait until there is at least one free job slot.
wait_for_free_job_slot()
{
	while true; do
		jobs -l > "$jobs_tmp_file" # can't use pipe on dash
		[ "$(cat "$jobs_tmp_file" | wc -l)" -lt $opt_jobs ] && break
		# Too many jobs. Waiting...
		sleep 0.1
	done
}

# $1 is the PID of the job to wait for.
wait_for_job_pid()
{
	local jobpid="$1"

	while true; do
		jobs -l > "$jobs_tmp_file" # can't use pipe on dash
		cat "$jobs_tmp_file" | tr -d '+-' |\
			sed -e 's/[[:blank:]]\+/\t/g' | cut -f2 |\
			grep -qe '^'"$jobpid"'$' || break
		# Job is still running...
		sleep 0.1
	done
}

# Returns true (0), if at least one background job failed.
check_job_failure()
{
	is_parallel_run &&\
	[ -e "$test_fail_file" ] &&\
	[ "0" != "$(du -s "$test_fail_file" | cut -f1)" ]
}

wait_for_all_background_jobs()
{
	is_parallel_run || return

	infomsg "Waiting for background jobs..."
	wait
	# Print the fail information.
	if check_job_failure; then
		errormsg
		errormsg "===== FAILURES in parallel run: ====="
		cat "$test_fail_file" >&2
		errormsg "====================================="
		global_retval=1
	fi
}

# $1=interpreter
# Returns version on stdout as:  MAJOR MINOR PATCHLEVEL
get_interpreter_version()
{
	local interpreter="$1"

	[ "$interpreter" = "cython3" ] && local interpreter=python3

	"$interpreter" -c 'import sys; print("%d %d %d" % sys.version_info[0:3]);' 2>/dev/null
}

# Check if an interpreter is able to run GUI code.
# $1=interpreter
interpreter_is_gui_compat()
{
	local interpreter="$1"

	[ $opt_nogui -eq 0 ] &&\
	[ "$interpreter" = "python3" -o \
	  "$interpreter" = "cython3" ]
}

# $1=program_name
have_prog()
{
	local program="$1"

	which "$program" >/dev/null 2>&1
}

# $1=executable_name
find_executable()
{
	local executable_name="$1"

	local executable_path="$(which "$executable_name")"
	[ -n "$executable_path" ] ||\
		die "$executable_name executable not found."\
		    "Please install $executable_name."
	RET="$executable_path"
}

# Check DOS file encoding.
# $1=file
check_dos_text_encoding()
{
	local file="$1"

	if [ x"$(du -b "$file" | cut -f1)" != x"0" ]; then
		# Check CR/LF
		file -L "$file" | grep -qe 'CRLF line terminators' || {
			die "ERROR: '$file' is not in DOS format."
		}
		# Check file encoding
		file -L "$file" | grep -qEe '(ISO-8859 text)|(ASCII text)' || {
			die "ERROR: '$file' invalid file encoding."
		}
	fi
}

# $1=interpreter $2=tested_file [$3=test_name]
setup_test_environment()
{
	local interpreter="$1"
	local tested_file="$2"
	local test_name="$3"

	[ -z "$test_name" ] && local test_name="$tested_file"
	local test_name="$(realpath -m --no-symlinks --relative-base="$rootdir" "$test_name" | tr '/\\' _)"

	# Check if we want to run on Cython3 and set up the environment.
	local use_cython=0
	if [ "$interpreter" = "cython3" ] ||\
	   [ "$interpreter" = "python3" -a "$AWLSIM_CYTHON" != "" ]; then
		# We want to run the test using Cython3

		local use_cython=3

		for i in "$rootdir"/build/lib.linux-*-3.*; do
			export PYTHONPATH="$i"
			break
		done
		# Enforce cython module usage
		export AWLSIM_CYTHON=2
		# The actual interpreter is Python
		local interpreter=python3

	elif [ "$interpreter" = "micropython" ]; then
		# We want to run the test using Micropython

		local interpreter="$rootdir/maintenance/micropython-wrapper.sh"

	else
		# Not Cython
		export PYTHONPATH=
		export AWLSIM_CYTHON=
	fi

	# Extra environment variables
	RAW_EXTRA_ENV="$(get_conf "$tested_file" env)"
	for env in $(printf '%s' "$RAW_EXTRA_ENV" | tr ':' ' '); do
		eval export "$env"
	done

	# Get extra PYTHONPATH from test case config file.
	local conf_pythonpath=
	if [ -n "$tested_file" ]; then
		local raw_conf_pythonpath="$(get_conf "$tested_file" PYTHONPATH)"
		local onepath=
		for onepath in $(printf '%s' "$raw_conf_pythonpath" | tr ':' ' '); do
			if [ -n "$conf_pythonpath" ]; then
				local conf_pythonpath="$conf_pythonpath:"
			fi
			local conf_pythonpath="${conf_pythonpath}$(realpath -m --no-symlinks "$rootdir/$onepath")"
		done
	fi

	# Export PYTHONPATHs
	export PYTHONPATH="$PYTHONPATH:$EXTRA_PYTHONPATH:$conf_pythonpath"
	export JYTHONPATH="$JYTHONPATH:$EXTRA_PYTHONPATH:$conf_pythonpath"
	export IRONPYTHONPATH="$IRONPYTHONPATH:$EXTRA_PYTHONPATH:$conf_pythonpath"
	export MICROPYPATH="$MICROPYPATH:$EXTRA_PYTHONPATH:$conf_pythonpath"

	# Disable Python optimization so that assert statements are enabled.
	# Enable warnings
	# Enable hash seed randomization.
	unset PYTHONSTARTUP
	unset PYTHONY2K
	unset PYTHONOPTIMIZE
	unset PYTHONDEBUG
	unset PYTHONDONTWRITEBYTECODE
	unset PYTHONINSPECT
	unset PYTHONIOENCODING
	unset PYTHONNOUSERSITE
	unset PYTHONUNBUFFERED
	unset PYTHONVERBOSE
	if [ $use_cython -eq 0 ]; then
		export PYTHONWARNINGS=once
	else
		export PYTHONWARNINGS=once,ignore::ImportWarning
	fi
	export PYTHONHASHSEED=random

	# Disable CPU affinity
	unset AWLSIM_AFFINITY

	# Setup coverage tracing
	if [ $coverage_enabled -eq 0 ]; then
		unset AWLSIM_COVERAGE
	else
		local coverage_data_file="$(maketemp "coverage_${test_name}" "$coverage_data_subdir")"
		rm "$coverage_data_file"

		export AWLSIM_COVERAGE="$coverage_data_file"
	fi

	RET="$interpreter"
}

cleanup_test_environment()
{
	export AWLSIM_CYTHON=

	export PYTHONPATH=
	export JYTHONPATH=
	export IRONPYTHONPATH=
	export MICROPYPATH=

	export EXTRA_PYTHONPATH=

	# Unexport all extra envs
	for env in $(printf '%s' "$RAW_EXTRA_ENV" | tr ':' ' '); do
		eval export "$(printf '%s' "$env" | cut -d'=' -f1)"=
	done
}

# $1=interpreter $2=awl_file ($3ff additional options to awlsim-test)
run_awl_test()
{
	local interpreter="$1"
	local awl="$2"
	shift; shift

	# By default run once with all optimizers enabled.
	local optimizer_runs="$(get_conf "$awl" optimizer_runs all)"

	local first_opti=1
	for optimizers in $optimizer_runs; do
		[ $first_opti -eq 0 ] && infomsg -n " / "
		local first_opti=0

		local tries="$(get_conf "$awl" tries 1)"
		[ $tries -lt 1 ] && local tries=1
		local first_try=1

		local ok=0
		local exit_code=-1
		local expected_exit_code=-2
		while [ $tries -gt 0 -a $ok -eq 0 ]; do
			local tries="$(expr "$tries" - 1)"

			(
				[ $first_try -ne 0 ] && adjust_niceness "$($SHELL -c 'echo $PPID')"
				setup_test_environment "$interpreter" "$awl"
				local actual_interpreter="$RET"

				local loglevel="$(get_conf "$awl" loglevel "$opt_loglevel")"
				local expected_exit_code="$(get_conf "$awl" exit_code 0)"
				[ $expected_exit_code -eq 0 ] || local loglevel=0
				local cycle_limit="$(get_conf "$awl" cycle_limit 60)"
				local max_runtime="$(get_conf "$awl" max_runtime -1)"
				local accus="$(get_conf "$awl" accus)"
				if [ "$accus" = "2" ]; then
					local accus=--twoaccu
				elif [ "$accus" = "4" ]; then
					local accus=--fouraccu
				elif [ -n "$accus" ]; then
					cleanup_test_environment
					die "Invalid 'accus' value in .conf"
				fi
				local dump_opt=
				[ $loglevel -ge 3 ] && local dump_opt="--no-cpu-dump"

				"$actual_interpreter" "$rootdir/awlsim-test" \
					--loglevel $loglevel \
					--extended-insns \
					--hardware debug:inputAddressBase=7:outputAddressBase=8:dummyParam=True \
					--cycle-limit "$cycle_limit" \
					--max-runtime "$max_runtime" \
					--optimizers "$optimizers" \
					$accus \
					$dump_opt \
					"$@" \
					"$awl"
				local exit_code=$?
				if [ $exit_code -ne $expected_exit_code ]; then
					# Test failed
					cleanup_test_environment
					if [ $tries -gt 0 ]; then
						infomsg "Test '$(basename "$awl")' FAILED, but retrying ($tries tries left)..."
						sleep 1
						die # Next try
					else
						test_failed "\nTest '$(basename "$awl")'   FAILED" \
							"\nInterpreter        = $interpreter" \
							"\nOptimizers         = $optimizers" \
							"\nActual exit code   = $exit_code" \
							"\nExpected exit code = $expected_exit_code"
					fi
				fi

				cleanup_test_environment
			) && local ok=1
			local first_try=0
		done
		if [ $ok -eq 0 ]; then
			die # Test failed
		fi
		if is_parallel_run; then
			infomsg "$(basename "$awl"): O=$optimizers -> OK"
		else
			infomsg -n "O=$optimizers -> OK"
		fi
	done
	is_parallel_run || infomsg
}

# $1=interpreter $2=sh_file
run_sh_test()
{
	local interpreter="$1"
	local sh_file="$2"
	shift; shift

	[ -x "$sh_file" ] && die "SH-file '$sh_file' must NOT be executable"

	[ "$(echo "$sh_file" | cut -c1)" = '/' ] || local sh_file="$(pwd)/$sh_file"

	# Run the test
	(
		# Source the test file
		. "$basedir/sh-test.defaults"
		. "$sh_file"

		adjust_niceness "$($SHELL -c 'echo $PPID')"
		setup_test_environment "$interpreter" "$sh_file"
		local interpreter="$RET"
		local test_dir="$(dirname "$sh_file")"
		local test_name="$(basename "$sh_file" .sh)"
		sh_test "$interpreter" "$test_dir" "$test_name"
		cleanup_test_environment
	)
	local result=$?

	[ $result -eq 0 ] || die "Test failed with error code $result"
	if is_parallel_run; then
		infomsg "$(basename "$sh_file"): OK"
	else
		infomsg "OK"
	fi
}

# $1=interpreter $2=test_file
run_pyunit_test()
{
	local interpreter="$1"
	local test_case="$2"
	shift; shift

	[ -z "$test_case" ] &&\
		die "Python unittest test case is missing"
	[ -d "$test_case" ] &&\
		die "Python unittest test case '$test_case' must not be a directory"
	[ -x "$test_case" ] &&\
		die "Python unittest test case '$test_case' must NOT be executable"

	# Resolve relative path
	[ "$(echo "$test_case" | cut -c1)" = '/' ] ||\
		local test_case="$(pwd)/$test_case"

	(
		# Add awlsim_tstlib.py to PYTHONPATH
		EXTRA_PYTHONPATH="$rootdir:$rootdir/tests:$EXTRA_PYTHONPATH"

		# Setup python environment
		adjust_niceness "$($SHELL -c 'echo $PPID')"
		local orig_interpreter="$interpreter"
		setup_test_environment "$interpreter" "$test_case"
		local interpreter="$RET"

		export PYTHONDONTWRITEBYTECODE=1

		if [ "$orig_interpreter" = "cython3" ] && ! [ -e "$(dirname "$test_case")/no_cython" ]; then
			# Get the relative test case path starting in 'tests' directory.
			local relpath="$(realpath -m --no-symlinks --relative-base="$rootdir/tests" "$test_case")"
			# Patch the module name to Cython name (append _cython).
			local patch_re='s/(tc[0-9][0-9][0-9]_[0-9a-zA-Z]*)/\1_cython/'
			local relpath_cython="$(printf "%s" "$relpath" | sed -Ee "$patch_re")"
			# Get the relative directory of the test case.
			local reldir_cython="$(dirname "$relpath_cython")"
			# Go to the unittest subdir to run the Cython unittest.
			cd "$rootdir/tests/build/"lib.*-3.*"/$reldir_cython" || die "Failed to cd to test directory."
		else
			# Go to the unittest subdir to run the Python unittest.
			cd "$(dirname "$test_case")" || die "Failed to cd to test directory."
		fi

		# Convert test name to module name (for python2)
		local test_case="$(basename "$test_case" .py)"

		# Run it.
		if [ -n "$AWLSIM_TEST_QUIET" ]; then
			"$interpreter" -m unittest "$test_case" >/dev/null 2>&1 ||\
				die "Python unittest test case '$(basename "$test_case")' failed."
		else
			"$interpreter" -m unittest "$test_case" ||\
				die "Python unittest test case '$(basename "$test_case")' failed."
		fi

		infomsg "$(basename "$test_case"): OK"

		cleanup_test_environment
	) || die "'$(basename "$test_case")' FAILED"
}

# $1=interpreter $2=testfile(.awl/.sh) ($3ff additional options to awlsim-test or testfile)
run_test()
{
	local interpreter="$1"
	local testfile="$2"
	shift; shift

	# Don't run ourself
	[ "$(basename "$testfile")" = "run.sh" ] && return
	# Don't run artifacts that aren't actual test cases.
	[ "$(basename "$testfile")" = "awlsim_tstlib.py" ] && return
	[ "$(basename "$testfile")" = "setup-cython-tests.py" ] && return
	[ "$(basename "$testfile")" = "__init__.py" ] && return

	local disabled="$(get_conf "$testfile" disabled)"
	if [ -z "$disabled" ]; then

		# Print test headline
		local nl="-n"
		is_parallel_run && local nl=
		infomsg $nl "$(basename "$testfile") @ $(basename "$interpreter"): "

		local prev_dir="$(pwd)"
		cd "$rootdir" || die "cd to $rootdir failed"

		# Check the file type and run the tester
		if [ "$(echo -n "$testfile" | tail -c4)" = ".awl" ]; then
			check_dos_text_encoding "$testfile"
			run_awl_test "$interpreter" "$testfile" "$@"
		elif [ "$(echo -n "$testfile" | tail -c7)" = ".awlpro" ]; then
			run_awl_test "$interpreter" "$testfile" "$@"
		elif [ "$(echo -n "$testfile" | tail -c3)" = ".sh" ]; then
			run_sh_test "$interpreter" "$testfile" "$@"
		elif [ "$(echo -n "$testfile" | tail -c3)" = ".py" ]; then
			run_pyunit_test "$interpreter" "$testfile" "$@"
		else
			die "Test file type of '$testfile' not recognized"
		fi

		cd "$prev_dir" || die "cd to $prev_dir failed"
	else
		warnmsg "Skipping '$testfile' as it is disabled."
	fi
}

run_test_parallel()
{
	if is_parallel_run; then
		# Run tests in parallel.
		wait_for_free_job_slot
		run_test "$@" &
	else
		# Run tests one-by-one.
		run_test "$@"
	fi
}

# $1=interpreter, $2=directory
run_test_directory()
{
	local interpreter="$1"
	local directory="$2"

	[ "$(basename "$directory")" = "build" ] && return

	local prettydir="$(realpath -m --no-symlinks --relative-base="$rootdir" "$directory")/"

	infomsg ">>> entering $prettydir"
	# run .awlpro tests
	for entry in "$directory"/*; do
		[ -d "$entry" ] && continue
		[ "$(echo -n "$entry" | tail -c7)" = ".awlpro" ] || continue
		[ -e "$(dirname "$entry")/$(basename "$entry" .awlpro).sh" ] && continue

		run_test_parallel "$interpreter" "$entry"
		check_job_failure && return
	done
	# run .awl tests
	for entry in "$directory"/*; do
		[ -d "$entry" ] && continue
		[ "$(echo -n "$entry" | tail -c4)" = ".awl" ] || continue
		[ -e "$(dirname "$entry")/$(basename "$entry" .awl).awlpro" ] && continue
		[ -e "$(dirname "$entry")/$(basename "$entry" .awl).sh" ] && continue

		run_test_parallel "$interpreter" "$entry"
		check_job_failure && return
	done
	# run .sh tests
	for entry in "$directory"/*; do
		[ -d "$entry" ] && continue
		[ "$(echo -n "$entry" | tail -c3)" = ".sh" ] || continue
		run_test_parallel "$interpreter" "$entry"
		check_job_failure && return
	done
	# run .py unittest tests
	for entry in "$directory"/*; do
		[ -d "$entry" ] && continue
		[ "$(echo -n "$entry" | tail -c3)" = ".py" ] || continue
		[ "$entry" = "__init__.py" ] && continue
		run_test_parallel "$interpreter" "$entry"
		check_job_failure && return
	done
	# Recurse into subdirectories
	for entry in "$directory"/*; do
		[ -d "$entry" ] || continue
		run_test_directory "$interpreter" "$entry"
	done
	infomsg "<<< leaving $prettydir"
}

# $1=interpreter
warn_skipped()
{
	local interpreter="$1"

	warnmsg "=== WARNING: '$interpreter' interpreter not found. Test skipped."
	warnmsg
}

__build_cython()
{
	local cython="$1"
	local python="$2"

	have_prog "$cython" && have_prog "$python" || {
		warnmsg "=== WARNING: Cannot build $cython modules"
		return 1
	}

	(
		infomsg "=== Building awlsim $cython modules with $python"
		cd "$rootdir" || die "cd to $rootdir failed"
		CFLAGS="-O0" CPPFLAGS= CXXFLAGS="-O0" LDFLAGS= \
			AWLSIM_CYTHON_BUILD=1 \
			AWLSIM_CYTHON_PARALLEL=1 \
			nice -n 5 \
			"$python" ./setup.py build >/dev/null ||\
			die "'$python ./setup.py build' failed"
	) || die

	(
		infomsg "=== Building awlsim $cython test cases with $python"
		cd "$rootdir/tests" || die "cd to $rootdir/tests failed"
		rm -rf build || die "Failed to clean test cases build"
		nice -n 5 \
			"$python" ./setup-cython-tests.py build >/dev/null ||\
			die "'$python ./setup-cython-tests.py build' failed"
	) || die

	return 0
}

build_cython3()
{
	__build_cython cython3 python3
}

# $@=testfiles
do_tests()
{
	cleanup_test_environment

	if [ $opt_quick -eq 0 ]; then
		local all_interp="python3 python2 cython3 pypy3"
		if [ $opt_extended -ne 0 ]; then
			local all_interp="$all_interp jython"
		fi
	else
		local all_interp="python3 python2"
		if [ $opt_extended -ne 0 ]; then
			die "The options --quick and --extended are mutually exclusive."
		fi
	fi

	for interpreter in "$opt_interpreter" $all_interp; do
		[ -z "$interpreter" ] && continue

		cleanup_test_environment

		# Create an interpreter name suitable as path component
		local interpreter_name="$(printf '%s' "$interpreter" | tr '/\\' _)"

		# Check if we should enable coverage tracing
		coverage_enabled=$opt_coverage
		if [ $coverage_enabled -ne 0 ] &&\
		   [ "$interpreter" = "pypy" -o "$interpreter" = "pypy3" ]; then
			# Performance impact of coverage on PyPy is too big.
			# Disable coverage to avoid test failures.
			warnmsg "Disabling code coverage tracing (-c|--coverage) on PyPy due to bad performace."
			coverage_enabled=0
		fi

		# Prepare code coverage directory
		coverage_data_subdir="coverage-$interpreter_name"
		mkdir -p "$tmp_dir/$coverage_data_subdir" || die "Failed to create coverage data dir"

		# Basic interpreter setup. Build Cython modules.
		if [ "$interpreter" = "cython3" ]; then
			have_prog cython3 && have_prog python3 || {
				warn_skipped "$interpreter"
				[ -n "$opt_interpreter" ] && break || continue
			}
			wait_for_all_background_jobs
			build_cython3 || die "Cython3 build failed."
		else
			have_prog "$interpreter" || {
				warn_skipped "$interpreter"
				[ -n "$opt_interpreter" ] && break || continue
			}
		fi

		local interp_ver="$(get_interpreter_version "$interpreter")"
		local interp_ver_dot="$(echo "$interp_ver" | tr ' ' '.')"
		local interp_major="$(echo "$interp_ver" | cut -d' ' -f 1)"
		local interp_minor="$(echo "$interp_ver" | cut -d' ' -f 2)"

		[ -z "$interp_ver" ] &&\
			die "Failed to get '$interpreter' version."
		[ "$interp_major" -eq 2 -a "$interp_minor" -lt 7 ] &&\
			die "'$interpreter' interpreter version '$interp_ver_dot' too old."

		infomsg "=== Running tests with '$interpreter'"
		if [ $# -eq 0 ]; then
			run_test_directory "$interpreter" "$basedir"
		else
			for opt in "$@"; do
				local opt="$(realpath -m --no-symlinks "$opt")"
				if [ -d "$opt" ]; then
					run_test_directory "$interpreter" "$opt"
				else
					run_test_parallel "$interpreter" "$opt"
				fi
				check_job_failure && break
			done
		fi
		infomsg

		check_job_failure && break

		# Generate code coverage report
		if [ $coverage_enabled -ne 0 ]; then
			# Wait for background jobs to finish
			wait_for_all_background_jobs

			if [ $global_retval -eq 0 ]; then
				infomsg "\nGenerating code coverage report..."
				local reportbase="$rootdir/code-coverage-report"
				local reportdir="$reportbase/awlsim-coverage-$interpreter_name"
				rm -rf "$reportdir"
				"$rootdir/awlsim-covreport" \
					"$reportdir" \
					"$tmp_dir/$coverage_data_subdir/" ||\
					die "Failed to generate code coverage report."
			fi
		fi

		[ -n "$opt_interpreter" ] && break
	done

	# Wait for background jobs to finish
	wait_for_all_background_jobs

	# Print summary
	if [ $global_retval -eq 0 ]; then
		infomsg
		infomsg -n "All tests succeeded"
	else
		errormsg
		errormsg -n "Some tests FAILED"
	fi
	if [ -n "$opt_interpreter" ]; then
		infomsg " (with interpreter '$opt_interpreter')"
	else
		if [ $opt_quick -eq 0 ]; then
			if [ $opt_extended -eq 0 ]; then
				infomsg " (full run)"
			else
				infomsg " (extended run)"
			fi
		else
			infomsg " (quick run)"
		fi
	fi
}

show_help()
{
	infomsg "awlsim unit test script"
	infomsg
	infomsg "Usage: run.sh [OPTIONS] [testdirectory/testscript.awl/.awlpro/.sh/.py]"
	infomsg
	infomsg "Options:"
	infomsg " -i|--interpreter INTER        Use INTER as interpreter for the tests"
	infomsg " -j|--jobs NR                  Set the number of jobs to run in parallel."
	infomsg "                               0 means number-of-CPUs"
	infomsg "                               Default: 0"
	infomsg " -q|--quick                    Only run python2 and python3 tests"
	infomsg " -g|--no-gui                   Avoid tests that need GUI libraries"
	infomsg " -x|--extended                 Run tests on additional interpreters"
	infomsg " -n|--renice NICENESS          Renice by NICENESS. Defaults to 10."
	infomsg " -Q|--quiet                    Less messages"
	infomsg " -L|--loglevel                 Default log level."
	infomsg " -l|--loop COUNT               Number of test loops to execute."
	infomsg "                               Default: 1"
	infomsg "                               Set to 0 for infinite looping."
	infomsg " -c|--coverage                 Enable code coverage tracing."
}

tmp_dir="/tmp/awlsim-test-$$"
rm -rf "$tmp_dir" >/dev/null 2>&1
if ! mkdir -p "$tmp_dir" >/dev/null 2>&1; then
	tmp_dir="$basedir/.tmp/awlsim-test-$$"
	rm -rf "$tmp_dir" >/dev/null 2>&1
	mkdir -p "$tmp_dir" || die "Failed to create temp dir '$tmp_dir'"
fi

trap cleanup_and_exit INT TERM
trap cleanup EXIT

test_fail_file="$(maketemp fail)"
port_alloc_file="$(maketemp port)"
jobs_tmp_file="$(maketemp jobs)"
touch "${port_alloc_file}.lock"
echo 4096 > "$port_alloc_file" || die "Failed to initialize port file"

have_prog file || die "Program 'file' not found."

opt_interpreter=
opt_quick=0
opt_nogui=0
opt_extended=0
opt_renice=
opt_jobs=0
opt_loglevel=2
opt_loop=1
opt_coverage=0

while [ $# -ge 1 ]; do
	[ "$(printf '%s' "$1" | cut -c1)" != "-" ] && break

	case "$1" in
	-h|--help)
		show_help
		exit 0
		;;
	-i|--interpreter)
		shift
		opt_interpreter="$1"
		have_prog "$opt_interpreter" ||\
			die "Interpreter '${opt_interpreter}' not found"
		;;
	-j|--jobs)
		shift
		opt_jobs="$1"
		;;
	-q|--quick)
		opt_quick=1
		;;
	-g|--no-gui)
		opt_nogui=1
		;;
	-x|--extended)
		opt_extended=1
		;;
	-n|--renice)
		shift
		opt_renice="$1"
		;;
	-Q|--quiet)
		export AWLSIM_TEST_QUIET=1
		;;
	-L|--loglevel)
		shift
		opt_loglevel="$1"
		;;
	-l|--loop)
		shift
		opt_loop="$1"
		;;
	-c|--coverage)
		opt_coverage=1
		;;
	*)
		errormsg "Unknown option: $1"
		exit 1
		;;
	esac
	shift
done

[ -z "$opt_jobs" -o -n "$(printf '%s' "$opt_jobs" | tr -d '[0-9]')" ] &&\
	die "--jobs: '$opt_jobs' is not a positive integer number."
if [ $opt_jobs -eq 0 ]; then
	opt_jobs="$(getconf _NPROCESSORS_ONLN)"
	opt_jobs="$(expr $opt_jobs + 2)"
fi
[ -z "$opt_jobs" ] &&\
	die "Could not detect number of CPUs."

if [ -z "$opt_loop" -o -n "$(printf '%s' "$opt_loop" | tr -d '[0-9]')" ] || [ $opt_loop -le 0 ]; then
	opt_loop=infinite
fi


do_renice()
{
	local niceness="$1"
	local pid="$2"

	renice "$niceness" "$pid" >/dev/null
}

adjust_niceness()
{
	local pid="$1"

	if [ -n "$opt_renice" ]; then
		do_renice "$opt_renice" "$pid" || die "Failed to renice"
	else
		# Try to renice. Ignore failure.
		do_renice 10 "$pid"
	fi
}


# Run the tests
global_retval=0
loop_iteration=0
while [ "$opt_loop" = "infinite" ] || [ $opt_loop -gt 0 ]; do
	infomsg "Running test loop iteration $(expr "$loop_iteration" + 1)"

	do_tests "$@"

	if [ $global_retval -ne 0 ]; then
		break
	fi
	if [ "$opt_loop" != "infinite" ]; then
		opt_loop="$(expr "$opt_loop" - 1)"
	fi
	loop_iteration="$(expr "$loop_iteration" + 1)"
done

exit $global_retval
bues.ch cgit interface