Deleted Added
full compact
common.sh (145620) common.sh (146822)
1#!/bin/sh
2#
3# Common code used run regression tests for usr.bin/make.
4#
1#!/bin/sh
2#
3# Common code used run regression tests for usr.bin/make.
4#
5# $FreeBSD: head/tools/regression/usr.bin/make/common.sh 145620 2005-04-28 13:20:48Z harti $
5# $FreeBSD: head/tools/regression/usr.bin/make/common.sh 146822 2005-05-31 14:13:07Z harti $
6
7#
6
7#
8# Output usage messsage.
8# Output a message and exit with an error.
9#
9#
10print_usage()
10fatal()
11{
11{
12 echo "Usage: $0 command"
13 echo " clean - remove temp files (get initial state)"
14 echo " compare - compare result of test to expected"
15 echo " desc - print description of test"
16 echo " diff - print diffs between results and expected"
17 echo " harness - produce output suiteable for Test::Harness"
18 echo " run - run the {test, compare, clean}"
19 echo " test - run test case"
20 echo " update - update the expected with current results"
12 echo "fatal: $*" >/dev/stderr
13 exit 1
21}
22
23#
14}
15
16#
24# Check if the test result is the same as the expected result.
17# Check whether the working directory exists - it must.
25#
18#
26# $1 Input file
19ensure_workdir()
20{
21 if [ ! -d ${WORK_DIR} ] ; then
22 fatal "working directory ${WORK_DIR} does not exist."
23 fi
24}
25
27#
26#
28hack_cmp()
27# Make sure all tests have been run
28#
29ensure_run()
29{
30{
30 local EXPECTED RESULT
31 EXPECTED="expected.$1"
32 RESULT=${WORK_DIR}/$1
31 if [ -z "${TEST_N}" ] ; then
32 TEST_N=1
33 fi
33
34
34 if [ -f $EXPECTED ]; then
35 diff -q $EXPECTED $RESULT 1> /dev/null 2> /dev/null
36 return $?
37 else
38 return 1 # FAIL
35 FAIL=
36 N=1
37 while [ ${N} -le ${TEST_N} ] ; do
38 eval skip=\${TEST_${N}_SKIP}
39 if [ -z "${skip}" ] ; then
40 if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
41 ! -f ${OUTPUT_DIR}/stdout.${N} -o \
42 ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
43 echo "Test ${SUBDIR}/${N} no yet run"
44 FAIL=yes
45 fi
46 fi
47 N=$((N + 1))
48 done
49
50 if [ ! -z "${FAIL}" ] ; then
51 exit 1
39 fi
40}
41
42#
52 fi
53}
54
55#
43# Check if the test result is the same as the expected result.
56# Output usage messsage.
44#
57#
45# $1 Input file
46#
47hack_diff()
58print_usage()
48{
59{
49 local EXPECTED RESULT
50 EXPECTED="expected.$1"
51 RESULT=${WORK_DIR}/$1
60 echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
61 echo " setup - setup working directory"
62 echo " run - run the tests"
63 echo " show - show test results"
64 echo " compare - compare actual and expected results"
65 echo " diff - diff actual and expected results"
66 echo " reset - reset the test to its initial state"
67 echo " clean - delete working and output directory"
68 echo " test - setup + run + compare"
69 echo " prove - setup + run + compare + clean"
70 echo " desc - print short description"
71 echo " update - update the expected results with the current results"
72 echo " help - show this information"
73}
52
74
53 echo diff -u $EXPECTED $RESULT
54 if [ -f $EXPECTED ]; then
55 diff -u $EXPECTED $RESULT
56 return $?
57 else
58 return 1 # FAIL
75#
76# Common function for setup and reset.
77#
78common_setup()
79{
80 #
81 # If a Makefile exists in the source directory - copy it over
82 #
83 if [ -e Makefile -a ! -e ${WORK_DIR}/Makefile ] ; then
84 cp Makefile ${WORK_DIR}/Makefile
59 fi
85 fi
86
87 #
88 # If the TEST_MAKE_DIRS variable is set, create those directories
89 #
90 set -- ${TEST_MAKE_DIRS}
91 while [ $# -ne 0 ] ; do
92 if [ ! -d ${WORK_DIR}/${1} ] ; then
93 mkdir -p -m ${2} ${WORK_DIR}/${1}
94 else
95 chmod ${2} ${WORK_DIR}/${1}
96 fi
97 shift ; shift
98 done
99
100 #
101 # If the TEST_COPY_FILES variable is set, copy those files over to
102 # the working directory. The value is assumed to be pairs of
103 # filenames and modes.
104 #
105 set -- ${TEST_COPY_FILES}
106 while [ $# -ne 0 ] ; do
107 if [ ! -e ${WORK_DIR}/${1} ] ; then
108 cp ${1} ${WORK_DIR}/${1}
109 fi
110 chmod ${2} ${WORK_DIR}/${1}
111 shift ; shift
112 done
113
114 #
115 # If the TEST_TOUCH variable is set, it is taken to be a list
116 # of pairs of filenames and arguments to touch(1). The arguments
117 # to touch must be surrounded by single quotes if there are more
118 # than one argument.
119 #
120 eval set -- ${TEST_TOUCH}
121 while [ $# -ne 0 ] ; do
122 eval touch ${2} ${WORK_DIR}/${1}
123 shift ; shift
124 done
125
126 #
127 # Now create links
128 #
129 eval set -- ${TEST_LINKS}
130 while [ $# -ne 0 ] ; do
131 eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
132 shift ; shift
133 done
60}
61
62#
134}
135
136#
63# Default setup_test() function.
137# Setup the test. This creates the working and output directories and
138# populates it with files. If there is a setup_test() function - call it.
64#
139#
65# The default function just does nothing.
140eval_setup()
141{
142 #
143 # Check whether the working directory exists. If it does exit
144 # fatally so that we don't clobber a test the user is working on.
145 #
146 if [ -d ${WORK_DIR} ] ; then
147 fatal "working directory ${WORK_DIR} already exists."
148 fi
149
150 #
151 # Now create it and the output directory
152 #
153 mkdir -p ${WORK_DIR}
154 rm -rf ${OUTPUT_DIR}
155 mkdir -p ${OUTPUT_DIR}
156
157 #
158 # Common stuff
159 #
160 common_setup
161
162 #
163 # Now after all execute the user's setup function if it exists.
164 #
165 setup_test
166}
167
66#
168#
67# Both the variables SRC_BASE WORK_BASE are available.
169# Default setup_test function does nothing. This may be overriden by
170# the test.
68#
69setup_test()
70{
71}
72
73#
171#
172setup_test()
173{
174}
175
176#
74# Default run_test() function. It can be replace by the
75# user specified regression test.
177# Reset the test. Here we need to rely on information from the test.
178# We executed the same steps as in the setup, by try not to clobber existing
179# files.
180# All files and directories that are listed on the TEST_CLEAN_FILES
181# variable are removed. Then the TEST_TOUCH list is executed and finally
182# the reset_test() function called if it exists.
76#
183#
77# Both the variables SRC_BASE WORK_BASE are available.
184eval_reset()
185{
186 ensure_workdir
187
188 #
189 # Clean the output directory
190 #
191 rm -rf ${OUTPUT_DIR}/*
192
193 #
194 # Common stuff
195 #
196 common_setup
197
198 #
199 # Remove files.
200 #
201 for f in ${TEST_CLEAN_FILES} ; do
202 rm -rf ${WORK_DIR}/${f}
203 done
204
205 #
206 # Execute test's function
207 #
208 reset_test
209}
210
78#
211#
79# Note: this function executes from a subshell.
212# Default reset_test function does nothing. This may be overriden by
213# the test.
80#
214#
81run_test()
215reset_test()
82{
216{
83 cd ${WORK_DIR}
84 $MAKE_PROG 1> stdout 2> stderr
85 echo $? > status
86}
87
88#
217}
218
219#
89# Default clean routine
220# Clean the test. This simply removes the working and output directories.
90#
221#
91clean_test()
222eval_clean()
92{
223{
224 rm -rf ${WORK_DIR}
225 rm -rf ${OUTPUT_DIR}
93}
94
95#
226}
227
228#
96# Clean working directory
229# Run the test.
97#
230#
98eval_clean()
231eval_run()
99{
232{
100 rm -f ${WORK_DIR}/stdout
101 rm -f ${WORK_DIR}/stderr
102 rm -f ${WORK_DIR}/status
103 clean_test
233 ensure_workdir
234
235 if [ -z "${TEST_N}" ] ; then
236 TEST_N=1
237 fi
238
239 N=1
240 while [ ${N} -le ${TEST_N} ] ; do
241 eval skip=\${TEST_${N}_SKIP}
242 if [ -z "${skip}" ] ; then
243 ( cd ${WORK_DIR} ;
244 exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
245 run_test ${N}
246 echo $? >${OUTPUT_DIR}/status.${N}
247 )
248 fi
249 N=$((N + 1))
250 done
104}
105
106#
251}
252
253#
254# Default run_test() function. It can be replaced by the
255# user specified regression test. The argument to this function is
256# the test number.
257#
258run_test()
259{
260 eval args=\${TEST_${1}-test${1}}
261 ${MAKE_PROG} $args
262}
263
264#
265# Show test results.
266#
267eval_show()
268{
269 ensure_workdir
270
271 if [ -z "${TEST_N}" ] ; then
272 TEST_N=1
273 fi
274
275 N=1
276 while [ ${N} -le ${TEST_N} ] ; do
277 eval skip=\${TEST_${N}_SKIP}
278 if [ -z "${skip}" ] ; then
279 echo "=== Test ${N} Status =================="
280 cat ${OUTPUT_DIR}/status.${N}
281 echo ".......... Stdout .................."
282 cat ${OUTPUT_DIR}/stdout.${N}
283 echo ".......... Stderr .................."
284 cat ${OUTPUT_DIR}/stderr.${N}
285 fi
286 N=$((N + 1))
287 done
288}
289
290#
107# Compare results with expected results
108#
109eval_compare()
110{
291# Compare results with expected results
292#
293eval_compare()
294{
111 hack_cmp stdout || FAIL="stdout $FAIL"
112 hack_cmp stderr || FAIL="stderr $FAIL"
113 hack_cmp status || FAIL="status $FAIL"
295 ensure_workdir
296 ensure_run
114
297
115 if [ ! -z "$FAIL" ]; then
116 FAIL=`echo $FAIL`
117 echo "$SUBDIR: Test failed {$FAIL}"
298 if [ -z "${TEST_N}" ] ; then
299 TEST_N=1
118 fi
300 fi
301
302 echo "1..${TEST_N}"
303 N=1
304 while [ ${N} -le ${TEST_N} ] ; do
305 eval skip=\${TEST_${N}_SKIP}
306 if [ -z "${skip}" ] ; then
307 FAIL=
308 do_compare stdout ${N} || FAIL="${FAIL}stdout "
309 do_compare stderr ${N} || FAIL="${FAIL}stderr "
310 do_compare status ${N} || FAIL="${FAIL}status "
311 if [ ! -z "$FAIL" ]; then
312 echo "not ok ${N} ${SUBDIR}/${N} # reason: ${FAIL}"
313 else
314 echo "ok ${N} ${SUBDIR}/${N}"
315 fi
316 else
317 echo "ok ${N} ${SUBDIR}/${N} # skip: ${skip}"
318 fi
319 N=$((N + 1))
320 done
119}
120
121#
321}
322
323#
122# Compare results with expected results for prove(1)
324# Check if the test result is the same as the expected result.
123#
325#
124eval_hcompare()
326# $1 Input file
327# $2 Test number
328#
329do_compare()
125{
330{
126 FAIL=
127 hack_cmp stdout || FAIL="stdout $FAIL"
128 hack_cmp stderr || FAIL="stderr $FAIL"
129 hack_cmp status || FAIL="status $FAIL"
331 local EXPECTED RESULT
332 EXPECTED="expected.$1.$2"
333 RESULT="${OUTPUT_DIR}/$1.$2"
130
334
131 if [ ! -z "$FAIL" ]; then
132 FAIL=`echo $FAIL`
133 echo "not ok 1 $SUBDIR # reason: {$FAIL}"
335 if [ -f $EXPECTED ]; then
336 diff -q $EXPECTED $RESULT 1>/dev/null 2>/dev/null
337 return $?
134 else
338 else
135 echo "ok 1 $SUBDIR"
339 return 1 # FAIL
136 fi
137}
138
139#
340 fi
341}
342
343#
140# Print description
344# Diff current and expected results
141#
345#
142eval_desc()
346eval_diff()
143{
347{
144 echo -n "$SUBDIR: "
145 desc_test
348 ensure_workdir
349 ensure_run
350
351 if [ -z "${TEST_N}" ] ; then
352 TEST_N=1
353 fi
354
355 N=1
356 while [ ${N} -le ${TEST_N} ] ; do
357 eval skip=\${TEST_${N}_SKIP}
358 if [ -z "${skip}" ] ; then
359 FAIL=
360 do_diff stdout ${N}
361 do_diff stderr ${N}
362 do_diff status ${N}
363 fi
364 N=$((N + 1))
365 done
146}
147
148#
366}
367
368#
149# Prepare and run the test
369# Check if the test result is the same as the expected result.
150#
370#
151eval_test()
371# $1 Input file
372# $2 Test number
373#
374do_diff()
152{
375{
153 [ -d ${WORK_DIR} ] || mkdir -p ${WORK_DIR}
154 if [ -f Makefile ] ; then
155 cp Makefile ${WORK_DIR}
376 local EXPECTED RESULT
377 EXPECTED="expected.$1.$2"
378 RESULT="${OUTPUT_DIR}/$1.$2"
379
380 echo diff -u $EXPECTED $RESULT
381 if [ -f $EXPECTED ]; then
382 diff -u $EXPECTED $RESULT
383 else
384 echo "${EXPECTED} does not exist"
156 fi
385 fi
157 setup_test
158 ( run_test )
159}
160
161#
386}
387
388#
162# Diff current and expected results
389# Update expected results
163#
390#
164eval_diff()
391eval_update()
165{
392{
166 eval_test
167 echo "------------------------"
168 echo "- $SUBDIR"
169 echo "------------------------"
170 hack_diff stdout
171 hack_diff stderr
172 hack_diff status
393 ensure_workdir
394 ensure_run
395
396 if [ -z "${TEST_N}" ] ; then
397 TEST_N=1
398 fi
399
400 FAIL=
401 N=1
402 while [ ${N} -le ${TEST_N} ] ; do
403 eval skip=\${TEST_${N}_SKIP}
404 if [ -z "${skip}" ] ; then
405 cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
406 cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
407 cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
408 fi
409 N=$((N + 1))
410 done
173}
174
175#
411}
412
413#
176# Run the test for prove(1)
414# Print description
177#
415#
178eval_harness()
416eval_desc()
179{
417{
180 echo 1..1
181 eval_test
182 eval_hcompare
183 eval_clean
418 echo "${SUBDIR}: ${DESC}"
184}
185
186#
187# Run the test
188#
419}
420
421#
422# Run the test
423#
189eval_run()
424eval_test()
190{
425{
191 eval_test
426 eval_setup
427 eval_run
192 eval_compare
428 eval_compare
193 eval_clean
194}
195
196#
429}
430
431#
197# Update expected results
432# Run the test for prove(1)
198#
433#
199eval_update()
434eval_prove()
200{
435{
201 eval_test
202 cat ${WORK_DIR}/stdout > expected.stdout
203 cat ${WORK_DIR}/stderr > expected.stderr
204 cat ${WORK_DIR}/status > expected.status
436 eval_setup
437 eval_run
438 eval_compare
439 eval_clean
205}
206
207#
440}
441
442#
208# Note: Uses global variable $DIR which might be assigned by
209# the script which sourced this file.
443# Main function. Execute the command(s) on the command line.
210#
211eval_cmd()
212{
213 if [ $# -eq 0 ] ; then
444#
445eval_cmd()
446{
447 if [ $# -eq 0 ] ; then
214 set -- harness
448 # if no arguments given default to 'prove'
449 set -- prove
215 fi
216
450 fi
451
217 case $1 in
218 clean|compare|hcompare|desc|diff|harness|run|test|update)
219 eval eval_$1
220 ;;
221 *)
222 print_usage
223 ;;
224 esac
452 for i
453 do
454 case $i in
455
456 setup | run | compare | diff | clean | reset | show | \
457 test | prove | desc | update)
458 eval eval_$i
459 ;;
460 * | help)
461 print_usage
462 ;;
463 esac
464 done
225}
226
465}
466
467##############################################################################
227#
468#
469# Main code
470#
471
472#
228# Parse command line arguments.
229#
230args=`getopt m:w:v $*`
231if [ $? != 0 ]; then
232 echo 'Usage: ...'
233 exit 2
234fi
235set -- $args

--- 22 unchanged lines hidden (view full) ---

258
259#
260# Determine our sub-directory. Argh.
261#
262SRC_DIR=`pwd`
263SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
264SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
265
473# Parse command line arguments.
474#
475args=`getopt m:w:v $*`
476if [ $? != 0 ]; then
477 echo 'Usage: ...'
478 exit 2
479fi
480set -- $args

--- 22 unchanged lines hidden (view full) ---

503
504#
505# Determine our sub-directory. Argh.
506#
507SRC_DIR=`pwd`
508SRC_BASE=`while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
509SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
510
511#
512# Construct working directory
513#
266WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
267WORK_DIR=${WORK_BASE}/${SUBDIR}
514WORK_BASE=${WORK_BASE:-"/tmp/$USER.make.test"}
515WORK_DIR=${WORK_BASE}/${SUBDIR}
268MAKE_PROG=${MAKE_PROG:-/usr/bin/make}
516OUTPUT_DIR=${WORK_DIR}.OUTPUT
269
517
270export MAKE_PROG
271export VERBOSE
272export SRC_BASE
273export WORK_BASE
274export SUBDIR
275export SRC_DIR
276export WORK_DIR
518#
519# Make to use
520#
521MAKE_PROG=${MAKE_PROG:-/usr/bin/make}