common.sh revision 146855
1#!/bin/sh
2#
3# Common code used run regression tests for usr.bin/make.
4#
5# $FreeBSD: head/tools/regression/usr.bin/make/common.sh 146855 2005-06-01 11:25:38Z harti $
6
7#
8# Output a message and exit with an error.
9#
10fatal()
11{
12	echo "fatal: $*" >/dev/stderr
13	exit 1
14}
15
16#
17# Check whether the working directory exists - it must.
18#
19ensure_workdir()
20{
21	if [ ! -d ${WORK_DIR} ] ; then
22		fatal "working directory ${WORK_DIR} does not exist."
23	fi
24}
25
26#
27# Make sure all tests have been run
28#
29ensure_run()
30{
31	if [ -z "${TEST_N}" ] ; then
32		TEST_N=1
33	fi
34
35	FAIL=
36	N=1
37	while [ ${N} -le ${TEST_N} ] ; do
38		if ! skip_test ${N} ; then
39			if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
40			     ! -f ${OUTPUT_DIR}/stdout.${N} -o \
41			     ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
42				echo "Test ${SUBDIR}/${N} no yet run"
43				FAIL=yes
44			fi
45		fi
46		N=$((N + 1))
47	done
48
49	if [ ! -z "${FAIL}" ] ; then
50		exit 1
51	fi
52}
53
54#
55# Output usage messsage.
56#
57print_usage()
58{
59	echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
60	echo " setup	- setup working directory"
61	echo " run	- run the tests"
62	echo " show	- show test results"
63	echo " compare	- compare actual and expected results"
64	echo " diff	- diff actual and expected results"
65	echo " reset	- reset the test to its initial state"
66	echo " clean	- delete working and output directory"
67	echo " test	- setup + run + compare"
68	echo " prove	- setup + run + compare + clean"
69	echo " desc	- print short description"
70	echo " update	- update the expected results with the current results"
71	echo " help	- show this information"
72}
73
74#
75# Return 0 if we should skip the test. 1 otherwise
76#
77skip_test()
78{
79	eval skip=\${TEST_${1}_SKIP}
80	if [ -z "${skip}" ] ; then
81		return 1
82	else
83		return 0
84	fi
85}
86
87#
88# Common function for setup and reset.
89#
90common_setup()
91{
92	#
93	# If a Makefile exists in the source directory - copy it over
94	#
95	if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
96		cp Makefile ${WORK_DIR}/Makefile
97	fi
98
99	#
100	# If the TEST_MAKE_DIRS variable is set, create those directories
101	#
102	set -- ${TEST_MAKE_DIRS}
103	while [ $# -ne 0 ] ; do
104		if [ ! -d ${WORK_DIR}/${1} ] ; then
105			mkdir -p -m ${2} ${WORK_DIR}/${1}
106		else
107			chmod ${2} ${WORK_DIR}/${1}
108		fi
109		shift ; shift
110	done
111
112	#
113	# If the TEST_COPY_FILES variable is set, copy those files over to
114	# the working directory. The value is assumed to be pairs of
115	# filenames and modes.
116	#
117	set -- ${TEST_COPY_FILES}
118	while [ $# -ne 0 ] ; do
119		if [ ! -e ${WORK_DIR}/${1} ] ; then
120			cp ${1} ${WORK_DIR}/${1}
121		fi
122		chmod ${2} ${WORK_DIR}/${1}
123		shift ; shift
124	done
125
126	#
127	# If the TEST_TOUCH variable is set, it is taken to be a list
128	# of pairs of filenames and arguments to touch(1). The arguments
129	# to touch must be surrounded by single quotes if there are more
130	# than one argument.
131	#
132	eval set -- ${TEST_TOUCH}
133	while [ $# -ne 0 ] ; do
134		eval touch ${2} ${WORK_DIR}/${1}
135		shift ; shift
136	done
137
138	#
139	# Now create links
140	#
141	eval set -- ${TEST_LINKS}
142	while [ $# -ne 0 ] ; do
143		eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
144		shift ; shift
145	done
146}
147
148#
149# Setup the test. This creates the working and output directories and
150# populates it with files. If there is a setup_test() function - call it.
151#
152eval_setup()
153{
154	#
155	# Check whether the working directory exists. If it does exit
156	# fatally so that we don't clobber a test the user is working on.
157	#
158	if [ -d ${WORK_DIR} ] ; then
159		fatal "working directory ${WORK_DIR} already exists."
160	fi
161
162	#
163	# Now create it and the output directory
164	#
165	mkdir -p ${WORK_DIR}
166	rm -rf ${OUTPUT_DIR}
167	mkdir -p ${OUTPUT_DIR}
168
169	#
170	# Common stuff
171	#
172	common_setup
173
174	#
175	# Now after all execute the user's setup function if it exists.
176	#
177	setup_test
178}
179
180#
181# Default setup_test function does nothing. This may be overriden by
182# the test.
183#
184setup_test()
185{
186}
187
188#
189# Reset the test. Here we need to rely on information from the test.
190# We executed the same steps as in the setup, by try not to clobber existing
191# files.
192# All files and directories that are listed on the TEST_CLEAN_FILES
193# variable are removed. Then the TEST_TOUCH list is executed and finally
194# the reset_test() function called if it exists.
195#
196eval_reset()
197{
198	ensure_workdir
199
200	#
201	# Clean the output directory
202	#
203	rm -rf ${OUTPUT_DIR}/*
204
205	#
206	# Common stuff
207	#
208	common_setup
209
210	#
211	# Remove files.
212	#
213	for f in ${TEST_CLEAN_FILES} ; do
214		rm -rf ${WORK_DIR}/${f}
215	done
216
217	#
218	# Execute test's function
219	#
220	reset_test
221}
222
223#
224# Default reset_test function does nothing. This may be overriden by
225# the test.
226#
227reset_test()
228{
229}
230
231#
232# Clean the test. This simply removes the working and output directories.
233#
234eval_clean()
235{
236	rm -rf ${WORK_DIR}
237	rm -rf ${OUTPUT_DIR}
238}
239
240#
241# Run the test.
242#
243eval_run()
244{
245	ensure_workdir
246
247	if [ -z "${TEST_N}" ] ; then
248		TEST_N=1
249	fi
250
251	N=1
252	while [ ${N} -le ${TEST_N} ] ; do
253		if ! skip_test ${N} ; then
254			( cd ${WORK_DIR} ;
255			  exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
256			  run_test ${N}
257			  echo $? >${OUTPUT_DIR}/status.${N}
258			)
259		fi
260		N=$((N + 1))
261	done
262}
263
264#
265# Default run_test() function.  It can be replaced by the
266# user specified regression test. The argument to this function is
267# the test number.
268#
269run_test()
270{
271	eval args=\${TEST_${1}-test${1}}
272        ${MAKE_PROG} $args
273}
274
275#
276# Show test results.
277#
278eval_show()
279{
280	ensure_workdir
281
282	if [ -z "${TEST_N}" ] ; then
283		TEST_N=1
284	fi
285
286	N=1
287	while [ ${N} -le ${TEST_N} ] ; do
288		if ! skip_test ${N} ; then
289			echo "=== Test ${N} Status =================="
290			cat ${OUTPUT_DIR}/status.${N}
291			echo ".......... Stdout .................."
292			cat ${OUTPUT_DIR}/stdout.${N}
293			echo ".......... Stderr .................."
294			cat ${OUTPUT_DIR}/stderr.${N}
295		fi
296		N=$((N + 1))
297	done
298}
299
300#
301# Compare results with expected results
302#
303eval_compare()
304{
305	ensure_workdir
306	ensure_run
307
308	if [ -z "${TEST_N}" ] ; then
309		TEST_N=1
310	fi
311
312	echo "1..${TEST_N}"
313	N=1
314	while [ ${N} -le ${TEST_N} ] ; do
315		fail=
316		todo=
317		if ! skip_test ${N} ; then
318			do_compare stdout ${N} || fail="${fail}stdout "
319			do_compare stderr ${N} || fail="${fail}stderr "
320			do_compare status ${N} || fail="${fail}status "
321			eval todo=\${TEST_${N}_TODO}
322		fi
323		if [ ! -z "$fail" ]; then
324			echo -n "not "
325		fi
326		echo -n "ok ${N} ${SUBDIR}/${N}"
327		if [ ! -z "$fail" -o ! -z "$todo" ]; then
328			echo -n " # "
329		fi
330		if [ ! -z "$todo" ] ; then
331			echo -n "TODO $todo; "
332		fi
333		if [ ! -z "$fail" ] ; then
334			echo "reason: ${fail}"
335		fi
336		echo
337		N=$((N + 1))
338	done
339}
340
341#
342# Check if the test result is the same as the expected result.
343#
344# $1	Input file
345# $2	Test number
346#
347do_compare()
348{
349	local EXPECTED RESULT
350	EXPECTED="expected.$1.$2"
351	RESULT="${OUTPUT_DIR}/$1.$2"
352
353	if [ -f $EXPECTED ]; then
354		diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
355		return $?
356	else
357		return 1	# FAIL
358	fi
359}
360
361#
362# Diff current and expected results
363#
364eval_diff()
365{
366	ensure_workdir
367	ensure_run
368
369	if [ -z "${TEST_N}" ] ; then
370		TEST_N=1
371	fi
372
373	N=1
374	while [ ${N} -le ${TEST_N} ] ; do
375		if ! skip_test ${N} ; then
376			FAIL=
377			do_diff stdout ${N}
378			do_diff stderr ${N}
379			do_diff status ${N}
380		fi
381		N=$((N + 1))
382	done
383}
384
385#
386# Check if the test result is the same as the expected result.
387#
388# $1	Input file
389# $2	Test number
390#
391do_diff()
392{
393	local EXPECTED RESULT
394	EXPECTED="expected.$1.$2"
395	RESULT="${OUTPUT_DIR}/$1.$2"
396
397	echo diff -u $EXPECTED $RESULT
398	if [ -f $EXPECTED ]; then
399		diff -u $EXPECTED $RESULT
400	else
401		echo "${EXPECTED} does not exist"
402	fi
403}
404
405#
406# Update expected results
407#
408eval_update()
409{
410	ensure_workdir
411	ensure_run
412
413	if [ -z "${TEST_N}" ] ; then
414		TEST_N=1
415	fi
416
417	FAIL=
418	N=1
419	while [ ${N} -le ${TEST_N} ] ; do
420		if ! skip_test ${N} ; then
421			cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
422			cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
423			cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
424		fi
425		N=$((N + 1))
426	done
427}
428
429#
430# Print description
431#
432eval_desc()
433{
434	echo "${SUBDIR}: ${DESC}"
435}
436
437#
438# Run the test
439#
440eval_test()
441{
442	eval_setup
443	eval_run
444	eval_compare
445}
446
447#
448# Run the test for prove(1)
449#
450eval_prove()
451{
452	eval_setup
453	eval_run
454	eval_compare
455	eval_clean
456}
457
458#
459# Main function. Execute the command(s) on the command line.
460#
461eval_cmd()
462{
463	if [ $# -eq 0 ] ; then
464		# if no arguments given default to 'prove'
465		set -- prove
466	fi
467
468	for i
469	do
470		case $i in
471
472		setup | run | compare | diff | clean | reset | show | \
473		test | prove | desc | update)
474			eval eval_$i
475			;;
476		* | help)
477			print_usage
478			;;
479		esac
480	done
481}
482
483##############################################################################
484#
485# Main code
486#
487
488#
489# Parse command line arguments.
490#
491args=`getopt m:w:v $*`
492if [ $? != 0 ]; then
493	echo 'Usage: ...'
494	exit 2
495fi
496set -- $args
497for i; do
498	case "$i" in
499	-m)
500		MAKE_PROG="$2"
501		shift
502		shift
503		;;
504	-w)
505		WORK_BASE="$2"
506		shift
507		shift
508		;;
509	-v)
510		VERBOSE=1
511		shift
512		;;
513	--)
514		shift
515		break
516		;;
517	esac
518done
519
520#
521# Determine our sub-directory. Argh.
522#
523SRC_DIR=`pwd`
524SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
525SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
526
527#
528# Construct working directory
529#
530WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
531WORK_DIR=${WORK_BASE}/${SUBDIR}
532OUTPUT_DIR=${WORK_DIR}.OUTPUT
533
534#
535# Make to use
536#
537MAKE_PROG=${MAKE_PROG:-/usr/bin/make}
538