common.sh revision 201526
112048Speter#!/bin/sh
212048Speter#
312048Speter# Common code used run regression tests for usr.bin/make.
412048Speter#
512048Speter# $FreeBSD: head/tools/regression/usr.bin/make/common.sh 201526 2010-01-04 18:57:22Z obrien $
612048Speter
712048Speter#
812048Speter# Output a message and exit with an error.
912048Speter#
1012048Speterfatal()
1112048Speter{
1212048Speter	echo "fatal: $*" >/dev/stderr
1312048Speter	exit 1
1412048Speter}
1512048Speter
1612048Speter#
1712048Speter# Check whether the working directory exists - it must.
1812048Speter#
1912048Speterensure_workdir()
2012048Speter{
2112048Speter	if [ ! -d ${WORK_DIR} ] ; then
2212048Speter		fatal "working directory ${WORK_DIR} does not exist."
2312048Speter	fi
2412048Speter}
2512048Speter
2612048Speter#
2712048Speter# Make sure all tests have been run
2812048Speter#
2912048Speterensure_run()
3012048Speter{
3112048Speter	if [ -z "${TEST_N}" ] ; then
3212048Speter		TEST_N=1
3312048Speter	fi
3412048Speter
3512048Speter	FAIL=
3612048Speter	N=1
3712048Speter	while [ ${N} -le ${TEST_N} ] ; do
3812048Speter		if ! skip_test ${N} ; then
3912048Speter			if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
4012048Speter			     ! -f ${OUTPUT_DIR}/stdout.${N} -o \
4112048Speter			     ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
4212048Speter				echo "Test ${SUBDIR}/${N} no yet run"
4312048Speter				FAIL=yes
4412048Speter			fi
4512048Speter		fi
4612048Speter		N=$((N + 1))
4712048Speter	done
4812048Speter
4912048Speter	if [ ! -z "${FAIL}" ] ; then
5012048Speter		exit 1
5112048Speter	fi
5212048Speter}
5312048Speter
5412048Speter#
5512048Speter# Output usage messsage.
5612048Speter#
5712048Speterprint_usage()
5812048Speter{
5912048Speter	echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
6012048Speter	echo " setup	- setup working directory"
6112048Speter	echo " run	- run the tests"
6212048Speter	echo " show	- show test results"
6312048Speter	echo " compare	- compare actual and expected results"
6412048Speter	echo " diff	- diff actual and expected results"
6512048Speter	echo " reset	- reset the test to its initial state"
6612048Speter	echo " clean	- delete working and output directory"
6712048Speter	echo " test	- setup + run + compare"
6812048Speter	echo " prove	- setup + run + compare + clean"
6912048Speter	echo " desc	- print short description"
7012048Speter	echo " update	- update the expected results with the current results"
7112048Speter	echo " help	- show this information"
7212048Speter}
7312048Speter
7412048Speter#
7512048Speter# Return 0 if we should skip the test. 1 otherwise
7612048Speter#
7712048Speterskip_test()
7812048Speter{
7912048Speter	eval skip=\${TEST_${1}_SKIP}
8012048Speter	if [ -z "${skip}" ] ; then
8112048Speter		return 1
8212048Speter	else
8312048Speter		return 0
8412048Speter	fi
8512048Speter}
8612048Speter
8712048Speter#
8812048Speter# Common function for setup and reset.
8912048Speter#
9012048Spetercommon_setup()
9112048Speter{
9212048Speter	#
9312048Speter	# If a Makefile exists in the source directory - copy it over
9412048Speter	#
9512048Speter	if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
9612048Speter		cp Makefile ${WORK_DIR}/Makefile
9712048Speter	fi
9812048Speter
9912048Speter	#
10012048Speter	# If the TEST_MAKE_DIRS variable is set, create those directories
10112048Speter	#
10212048Speter	set -- ${TEST_MAKE_DIRS}
10312048Speter	while [ $# -ne 0 ] ; do
10412048Speter		if [ ! -d ${WORK_DIR}/${1} ] ; then
10512048Speter			mkdir -p -m ${2} ${WORK_DIR}/${1}
10612048Speter		else
10712048Speter			chmod ${2} ${WORK_DIR}/${1}
10812048Speter		fi
10912048Speter		shift ; shift
11012048Speter	done
11112048Speter
11212048Speter	#
11312048Speter	# If the TEST_COPY_FILES variable is set, copy those files over to
11412048Speter	# the working directory. The value is assumed to be pairs of
11512048Speter	# filenames and modes.
11612048Speter	#
11712048Speter	set -- ${TEST_COPY_FILES}
11812048Speter	while [ $# -ne 0 ] ; do
11912048Speter		if [ ! -e ${WORK_DIR}/${1} ] ; then
12012048Speter			cp ${1} ${WORK_DIR}/${1}
12112048Speter		fi
12212048Speter		chmod ${2} ${WORK_DIR}/${1}
12312048Speter		shift ; shift
12412048Speter	done
12512048Speter
12612048Speter	#
12712048Speter	# If the TEST_TOUCH variable is set, it is taken to be a list
12812048Speter	# of pairs of filenames and arguments to touch(1). The arguments
12912048Speter	# to touch must be surrounded by single quotes if there are more
13012048Speter	# than one argument.
13112048Speter	#
13212048Speter	eval set -- ${TEST_TOUCH}
13312048Speter	while [ $# -ne 0 ] ; do
13412048Speter		eval touch ${2} ${WORK_DIR}/${1}
13512048Speter		shift ; shift
13612048Speter	done
13712048Speter
13812048Speter	#
13912048Speter	# Now create links
14012048Speter	#
14112048Speter	eval set -- ${TEST_LINKS}
14212048Speter	while [ $# -ne 0 ] ; do
14312048Speter		eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
14412048Speter		shift ; shift
14512048Speter	done
14612048Speter}
14712048Speter
14812048Speter#
14912048Speter# Setup the test. This creates the working and output directories and
15012048Speter# populates it with files. If there is a setup_test() function - call it.
15112048Speter#
15212048Spetereval_setup()
15312048Speter{
15412048Speter	#
15512048Speter	# Check whether the working directory exists. If it does exit
15612048Speter	# fatally so that we don't clobber a test the user is working on.
15712048Speter	#
15812048Speter	if [ -d ${WORK_DIR} ] ; then
15912048Speter		fatal "working directory ${WORK_DIR} already exists."
16012048Speter	fi
16112048Speter
16212048Speter	#
16312048Speter	# Now create it and the output directory
16412048Speter	#
16512048Speter	mkdir -p ${WORK_DIR}
16612048Speter	rm -rf ${OUTPUT_DIR}
16712048Speter	mkdir -p ${OUTPUT_DIR}
16812048Speter
16912048Speter	#
17012048Speter	# Common stuff
17112048Speter	#
17212048Speter	common_setup
17312048Speter
17412048Speter	#
17512048Speter	# Now after all execute the user's setup function if it exists.
17612048Speter	#
17712048Speter	setup_test
17812048Speter}
17912048Speter
18012048Speter#
18112048Speter# Default setup_test function does nothing. This may be overriden by
18212048Speter# the test.
18312048Speter#
18412048Spetersetup_test()
18512048Speter{
18612048Speter}
18712048Speter
18812048Speter#
18912048Speter# Reset the test. Here we need to rely on information from the test.
19012048Speter# We executed the same steps as in the setup, by try not to clobber existing
19112048Speter# files.
19212048Speter# All files and directories that are listed on the TEST_CLEAN_FILES
19312048Speter# variable are removed. Then the TEST_TOUCH list is executed and finally
19412048Speter# the reset_test() function called if it exists.
19512048Speter#
19612048Spetereval_reset()
19712048Speter{
19812048Speter	ensure_workdir
19912048Speter
20012048Speter	#
20112048Speter	# Clean the output directory
20212048Speter	#
20312048Speter	rm -rf ${OUTPUT_DIR}/*
20412048Speter
205	#
206	# Common stuff
207	#
208	common_setup
209
210	#
211	# Remove files.
212	#
213	for f in ${TEST_CLEAN_FILES} ; do
214		rm -rf ${WORK_DIR}/${f}
215	done
216
217	#
218	# Execute test's function
219	#
220	reset_test
221}
222
223#
224# Default reset_test function does nothing. This may be overriden by
225# the test.
226#
227reset_test()
228{
229}
230
231#
232# Clean the test. This simply removes the working and output directories.
233#
234eval_clean()
235{
236	#
237	# If you have special cleaning needs, provide a 'cleanup' shell script.
238	#
239	if [ -n "${TEST_CLEANUP}" ] ; then
240		. ${SRC_DIR}/cleanup
241	fi
242	rm -rf ${WORK_DIR}
243	rm -rf ${OUTPUT_DIR}
244}
245
246#
247# Run the test.
248#
249eval_run()
250{
251	ensure_workdir
252
253	if [ -z "${TEST_N}" ] ; then
254		TEST_N=1
255	fi
256
257	N=1
258	while [ ${N} -le ${TEST_N} ] ; do
259		if ! skip_test ${N} ; then
260			( cd ${WORK_DIR} ;
261			  exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
262			  run_test ${N}
263			  echo $? >${OUTPUT_DIR}/status.${N}
264			)
265		fi
266		N=$((N + 1))
267	done
268}
269
270#
271# Default run_test() function.  It can be replaced by the
272# user specified regression test. The argument to this function is
273# the test number.
274#
275run_test()
276{
277	eval args=\${TEST_${1}-test${1}}
278        ${MAKE_PROG} $args
279}
280
281#
282# Show test results.
283#
284eval_show()
285{
286	ensure_workdir
287
288	if [ -z "${TEST_N}" ] ; then
289		TEST_N=1
290	fi
291
292	N=1
293	while [ ${N} -le ${TEST_N} ] ; do
294		if ! skip_test ${N} ; then
295			echo "=== Test ${N} Status =================="
296			cat ${OUTPUT_DIR}/status.${N}
297			echo ".......... Stdout .................."
298			cat ${OUTPUT_DIR}/stdout.${N}
299			echo ".......... Stderr .................."
300			cat ${OUTPUT_DIR}/stderr.${N}
301		fi
302		N=$((N + 1))
303	done
304}
305
306#
307# Compare results with expected results
308#
309eval_compare()
310{
311	ensure_workdir
312	ensure_run
313
314	if [ -z "${TEST_N}" ] ; then
315		TEST_N=1
316	fi
317
318	echo "1..${TEST_N}"
319	N=1
320	while [ ${N} -le ${TEST_N} ] ; do
321		fail=
322		todo=
323		if ! skip_test ${N} ; then
324			do_compare stdout ${N} || fail="${fail}stdout "
325			do_compare stderr ${N} || fail="${fail}stderr "
326			do_compare status ${N} || fail="${fail}status "
327			eval todo=\${TEST_${N}_TODO}
328		fi
329		if [ ! -z "$fail" ]; then
330			echo -n "not "
331		fi
332		echo -n "ok ${N} ${SUBDIR}/${N}"
333		if [ ! -z "$fail" -o ! -z "$todo" ]; then
334			echo -n " # "
335		fi
336		if [ ! -z "$todo" ] ; then
337			echo -n "TODO $todo; "
338		fi
339		if [ ! -z "$fail" ] ; then
340			echo "reason: ${fail}"
341		fi
342		echo
343		N=$((N + 1))
344	done
345}
346
347#
348# Check if the test result is the same as the expected result.
349#
350# $1	Input file
351# $2	Test number
352#
353do_compare()
354{
355	local EXPECTED RESULT
356	EXPECTED="expected.$1.$2"
357	RESULT="${OUTPUT_DIR}/$1.$2"
358
359	if [ -f $EXPECTED ]; then
360		diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
361		return $?
362	else
363		return 1	# FAIL
364	fi
365}
366
367#
368# Diff current and expected results
369#
370eval_diff()
371{
372	ensure_workdir
373	ensure_run
374
375	if [ -z "${TEST_N}" ] ; then
376		TEST_N=1
377	fi
378
379	N=1
380	while [ ${N} -le ${TEST_N} ] ; do
381		if ! skip_test ${N} ; then
382			FAIL=
383			do_diff stdout ${N}
384			do_diff stderr ${N}
385			do_diff status ${N}
386		fi
387		N=$((N + 1))
388	done
389}
390
391#
392# Check if the test result is the same as the expected result.
393#
394# $1	Input file
395# $2	Test number
396#
397do_diff()
398{
399	local EXPECTED RESULT
400	EXPECTED="expected.$1.$2"
401	RESULT="${OUTPUT_DIR}/$1.$2"
402
403	echo diff -u $EXPECTED $RESULT
404	if [ -f $EXPECTED ]; then
405		diff -u $EXPECTED $RESULT
406	else
407		echo "${EXPECTED} does not exist"
408	fi
409}
410
411#
412# Update expected results
413#
414eval_update()
415{
416	ensure_workdir
417	ensure_run
418
419	if [ -z "${TEST_N}" ] ; then
420		TEST_N=1
421	fi
422
423	FAIL=
424	N=1
425	while [ ${N} -le ${TEST_N} ] ; do
426		if ! skip_test ${N} ; then
427			cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
428			cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
429			cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
430		fi
431		N=$((N + 1))
432	done
433}
434
435#
436# Print description
437#
438eval_desc()
439{
440	echo "${SUBDIR}: ${DESC}"
441}
442
443#
444# Run the test
445#
446eval_test()
447{
448	eval_setup
449	eval_run
450	eval_compare
451}
452
453#
454# Run the test for prove(1)
455#
456eval_prove()
457{
458	eval_setup
459	eval_run
460	eval_compare
461	eval_clean
462}
463
464#
465# Main function. Execute the command(s) on the command line.
466#
467eval_cmd()
468{
469	if [ $# -eq 0 ] ; then
470		# if no arguments given default to 'prove'
471		set -- prove
472	fi
473
474	for i
475	do
476		case $i in
477
478		setup | run | compare | diff | clean | reset | show | \
479		test | prove | desc | update)
480			eval eval_$i
481			;;
482		* | help)
483			print_usage
484			;;
485		esac
486	done
487}
488
489##############################################################################
490#
491# Main code
492#
493
494#
495# Parse command line arguments.
496#
497args=`getopt m:w:v $*`
498if [ $? != 0 ]; then
499	echo 'Usage: ...'
500	exit 2
501fi
502set -- $args
503for i; do
504	case "$i" in
505	-m)
506		MAKE_PROG="$2"
507		shift
508		shift
509		;;
510	-w)
511		WORK_BASE="$2"
512		shift
513		shift
514		;;
515	-v)
516		VERBOSE=1
517		shift
518		;;
519	--)
520		shift
521		break
522		;;
523	esac
524done
525
526#
527# Determine our sub-directory. Argh.
528#
529SRC_DIR=`pwd`
530SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
531SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
532
533#
534# Construct working directory
535#
536WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
537WORK_DIR=${WORK_BASE}/${SUBDIR}
538OUTPUT_DIR=${WORK_DIR}.OUTPUT
539
540#
541# Make to use
542#
543MAKE_PROG=${MAKE_PROG:-/usr/bin/make}
544