1# See the file LICENSE for redistribution information.
2#
3# Copyright (c) 1996,2008 Oracle.  All rights reserved.
4#
5# $Id: test.tcl,v 12.54 2008/05/13 18:32:51 carol Exp $
6
7source ./include.tcl
8
9# Load DB's TCL API.
10load $tcllib
11
12if { [file exists $testdir] != 1 } {
13	file mkdir $testdir
14}
15
16global __debug_print
17global __debug_on
18global __debug_test
19
20#
21# Test if utilities work to figure out the path.  Most systems
22# use ., but QNX has a problem with execvp of shell scripts which
23# causes it to break.
24#
25set stat [catch {exec ./db_printlog -?} ret]
26if { [string first "exec format error" $ret] != -1 } {
27	set util_path ./.libs
28} else {
29	set util_path .
30}
31set __debug_print 0
32set encrypt 0
33set old_encrypt 0
34set passwd test_passwd
35
36# Error stream that (should!) always go to the console, even if we're
37# redirecting to ALL.OUT.
38set consoleerr stderr
39
40set dict $test_path/wordlist
41set alphabet "abcdefghijklmnopqrstuvwxyz"
42set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
43
44# Random number seed.
45global rand_init
46set rand_init 11302005
47
48# Default record length for fixed record length access method(s)
49set fixed_len 20
50
51set recd_debug	0
52set log_log_record_types 0
53set ohandles {}
54
55# Normally, we're not running an all-tests-in-one-env run.  This matters
56# for error stream/error prefix settings in berkdb_open.
57global is_envmethod
58set is_envmethod 0
59
60#
61# Set when we're running a child process in a rep test.
62#
63global is_repchild
64set is_repchild 0
65
66# Set when we want to use replication test messaging that cannot
67# share an env -- for example, because the replication processes
68# are not all from the same BDB version.
69global noenv_messaging
70set noenv_messaging 0
71
72# For testing locker id wrap around.
73global lock_curid
74global lock_maxid
75set lock_curid 0
76set lock_maxid 2147483647
77global txn_curid
78global txn_maxid
79set txn_curid 2147483648
80set txn_maxid 4294967295
81
82# The variable one_test allows us to run all the permutations
83# of a test with run_all or run_std.
84global one_test
85if { [info exists one_test] != 1 } {
86	set one_test "ALL"
87}
88
89# If you call a test with the proc find_valid_methods, it will
90# return the list of methods for which it will run, instead of
91# actually running.
92global checking_valid_methods
93set checking_valid_methods 0
94global valid_methods
95set valid_methods { btree rbtree queue queueext recno frecno rrecno hash }
96
97# The variable test_recopts controls whether we open envs in
98# replication tests with the -recover flag.   The default is
99# to test with and without the flag, but to run a meaningful
100# subset of rep tests more quickly, rep_subset will randomly
101# pick one or the other.
102global test_recopts
103set test_recopts { "-recover" "" }
104
105# Set up any OS-specific values.
106source $test_path/testutils.tcl
107
108global tcl_platform
109set is_freebsd_test [string match FreeBSD $tcl_platform(os)]
110set is_hp_test [string match HP-UX $tcl_platform(os)]
111set is_linux_test [string match Linux $tcl_platform(os)]
112set is_qnx_test [string match QNX $tcl_platform(os)]
113set is_sunos_test [string match SunOS $tcl_platform(os)]
114set is_windows_test [string match Win* $tcl_platform(os)]
115set is_windows9x_test [string match "Windows 95" $tcl_platform(osVersion)]
116set is_je_test 0
117set upgrade_be [big_endian]
118global is_fat32
119set is_fat32 [string match FAT32 [lindex [file system check] 1]]
120global EXE BAT
121if { $is_windows_test == 1 } {
122	set EXE ".exe"
123	set BAT ".bat"
124} else {
125	set EXE ""
126	set BAT ""
127}
128
129# This is where the test numbering and parameters now live.
130source $test_path/testparams.tcl
131
132# Try to open an encrypted database.  If it fails, this release
133# doesn't support encryption, and encryption tests should be skipped.
134set has_crypto 1
135set stat [catch {set db [eval {berkdb_open_noerr \
136    -create -btree -encryptaes test_passwd} ] } result ]
137if { $stat != 0 } {
138	# Make sure it's the right error for a non-crypto release.
139	error_check_good non_crypto_release \
140	    [expr [is_substr $result "operation not supported"] || \
141	    [is_substr $result "invalid argument"]] 1
142	set has_crypto 0
143} else {
144	# It is a crypto release.  Get rid of the db, we don't need it.
145	error_check_good close_encrypted_db [$db close] 0
146}
147
148# From here on out, test.tcl contains the procs that are used to
149# run all or part of the test suite.
150
151proc run_std { { testname ALL } args } {
152	global test_names
153	global one_test
154	global has_crypto
155	global valid_methods
156	source ./include.tcl
157
158	set one_test $testname
159	if { $one_test != "ALL" } {
160		# Source testparams again to adjust test_names.
161		source $test_path/testparams.tcl
162	}
163
164	set exflgs [eval extractflags $args]
165	set args [lindex $exflgs 0]
166	set flags [lindex $exflgs 1]
167
168	set display 1
169	set run 1
170	set am_only 0
171	set no_am 0
172	set std_only 1
173	set rflags {--}
174	foreach f $flags {
175		switch $f {
176			A {
177				set std_only 0
178			}
179			M {
180				set no_am 1
181				puts "run_std: all but access method tests."
182			}
183			m {
184				set am_only 1
185				puts "run_std: access method tests only."
186			}
187			n {
188				set display 1
189				set run 0
190				set rflags [linsert $rflags 0 "-n"]
191			}
192		}
193	}
194
195	if { $std_only == 1 } {
196		fileremove -f ALL.OUT
197
198		set o [open ALL.OUT a]
199		if { $run == 1 } {
200			puts -nonewline "Test suite run started at: "
201			puts [clock format [clock seconds] -format "%H:%M %D"]
202			puts [berkdb version -string]
203
204			puts -nonewline $o "Test suite run started at: "
205			puts $o [clock format [clock seconds] -format "%H:%M %D"]
206			puts $o [berkdb version -string]
207		}
208		close $o
209	}
210
211	set test_list {
212	{"environment"		"env"}
213	{"archive"		"archive"}
214	{"backup"		"backup"}
215	{"file operations"	"fop"}
216	{"locking"		"lock"}
217	{"logging"		"log"}
218	{"memory pool"		"memp"}
219	{"transaction"		"txn"}
220	{"deadlock detection"	"dead"}
221	{"subdatabase"		"sdb"}
222	{"byte-order"		"byte"}
223	{"recno backing file"	"rsrc"}
224	{"DBM interface"	"dbm"}
225	{"NDBM interface"	"ndbm"}
226	{"Hsearch interface"	"hsearch"}
227	{"secondary index"	"sindex"}
228	}
229
230	# If this is run_std only, run each rep test for a single
231	# access method.  If run_all, run for all access methods.
232	if { $std_only == 1 } {
233		lappend test_list {"replication"	"rep_subset"}
234	} else {
235		lappend test_list {"replication"	"rep_complete"}
236	}
237
238	# If release supports encryption, run security tests.
239	if { $has_crypto == 1 } {
240	        lappend test_list {"security"   "sec"}
241	}
242
243	if { $am_only == 0 } {
244		foreach pair $test_list {
245			set msg [lindex $pair 0]
246			set cmd [lindex $pair 1]
247			puts "Running $msg tests"
248			if [catch {exec $tclsh_path << \
249			    "global one_test; set one_test $one_test; \
250			    source $test_path/test.tcl; r $rflags $cmd" \
251			    >>& ALL.OUT } res] {
252				set o [open ALL.OUT a]
253				puts $o "FAIL: $cmd test: $res"
254				close $o
255			}
256		}
257
258		# Run recovery tests.
259		#
260		# XXX These too are broken into separate tclsh instantiations
261		# so we don't require so much memory, but I think it's cleaner
262		# and more useful to do it down inside proc r than here,
263		# since "r recd" gets done a lot and needs to work.
264		#
265		# Note that we still wrap the test in an exec so that
266		# its output goes to ALL.OUT.  run_recd will wrap each test
267		# so that both error streams go to stdout (which here goes
268		# to ALL.OUT);  information that run_recd wishes to print
269		# to the "real" stderr, but outside the wrapping for each test,
270		# such as which tests are being skipped, it can still send to
271		# stderr.
272		puts "Running recovery tests"
273		if [catch {
274		    exec $tclsh_path << \
275		    "global one_test; set one_test $one_test; \
276		    source $test_path/test.tcl; r $rflags recd" \
277			2>@ stderr >> ALL.OUT
278		    } res] {
279			set o [open ALL.OUT a]
280			puts $o "FAIL: recd tests: $res"
281			close $o
282		}
283
284		# Run join test
285		#
286		# XXX
287		# Broken up into separate tclsh instantiations so we don't
288		# require so much memory.
289		if { $one_test == "ALL" } {
290			puts "Running join test"
291			foreach test "join1 join2 join3 join4 join5 join6" {
292				if [catch {exec $tclsh_path << \
293				    "source $test_path/test.tcl; r $rflags $test" \
294				    >>& ALL.OUT } res] {
295					set o [open ALL.OUT a]
296					puts $o "FAIL: $test test: $res"
297					close $o
298				}
299			}
300		}
301	}
302
303	if { $no_am == 0 } {
304		# Access method tests.
305		#
306		# XXX
307		# Broken up into separate tclsh instantiations so we don't
308		# require so much memory.
309		foreach method $valid_methods {
310			puts "Running $method tests"
311			foreach test $test_names(test) {
312				if { $run == 0 } {
313					set o [open ALL.OUT a]
314					run_method \
315					    -$method $test $display $run $o
316					close $o
317				}
318				if { $run } {
319					if [catch {exec $tclsh_path << \
320					    "global one_test; \
321					    set one_test $one_test; \
322					    source $test_path/test.tcl; \
323					    run_method \
324					    -$method $test $display $run"\
325					    >>& ALL.OUT } res] {
326						set o [open ALL.OUT a]
327						puts $o "FAIL:$test $method: $res"
328						close $o
329					}
330				}
331			}
332		}
333	}
334
335	# If not actually running, no need to check for failure.
336	# If running in the context of the larger 'run_all' we don't
337	# check for failure here either.
338	if { $run == 0 || $std_only == 0 } {
339		return
340	}
341
342	set failed [check_output ALL.OUT]
343
344	set o [open ALL.OUT a]
345	if { $failed == 0 } {
346		puts "Regression Tests Succeeded"
347		puts $o "Regression Tests Succeeded"
348	} else {
349		puts "Regression Tests Failed"
350		puts "Check UNEXPECTED OUTPUT lines."
351		puts "Review ALL.OUT.x for details."
352		puts $o "Regression Tests Failed"
353	}
354
355	puts -nonewline "Test suite run completed at: "
356	puts [clock format [clock seconds] -format "%H:%M %D"]
357	puts -nonewline $o "Test suite run completed at: "
358	puts $o [clock format [clock seconds] -format "%H:%M %D"]
359	close $o
360}
361
362proc check_output { file } {
363	# These are all the acceptable patterns.
364	set pattern {(?x)
365		^[:space:]*$|
366		.*?wrap\.tcl.*|
367		.*?dbscript\.tcl.*|
368		.*?ddscript\.tcl.*|
369		.*?mpoolscript\.tcl.*|
370		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)$|
371		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\sCrashing$|
372		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s[p|P]rocesses\srunning:.*|
373		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s5\sprocesses\srunning.*|
374		^\d:\sPut\s\d*\sstrings\srandom\soffsets.*|
375		^100.*|
376		^eval\s.*|
377		^exec\s.*|
378		^fileops:\s.*|
379		^jointest.*$|
380		^r\sarchive\s*|
381		^r\sbackup\s*|
382		^r\sdbm\s*|
383		^r\shsearch\s*|
384		^r\sndbm\s*|
385		^r\srpc\s*|
386		^run_recd:\s.*|
387		^run_reptest\s.*|
388		^run_rpcmethod:\s.*|
389		^run_secenv:\s.*|
390		^All\sprocesses\shave\sexited.$|
391		^Backuptest\s.*|
392		^Beginning\scycle\s\d$|
393		^Byteorder:.*|
394		^Child\sruns\scomplete\.\s\sParent\smodifies\sdata\.$|
395		^Deadlock\sdetector:\s\d*\sCheckpoint\sdaemon\s\d*$|
396		^Ending\srecord.*|
397		^Environment\s.*?specified;\s\sskipping\.$|
398		^Executing\srecord\s.*|
399		^Join\stest:\.*|
400		^Method:\s.*|
401		^Repl:\stest\d\d\d:.*|
402		^Repl:\ssdb\d\d\d:.*|
403		^Running\stest\ssdb.*|
404		^Running\stest\stest.*|
405		^Script\swatcher\sprocess\s.*|
406		^Secondary\sindex\sjoin\s.*|
407		^\sBerkeley\sDB\s.*|
408		^Test\ssuite\srun\s.*|
409		^Unlinking\slog:\serror\smessage\sOK$|
410		^Verifying\s.*|
411		^\t*\.\.\.dbc->get.*$|
412		^\t*\.\.\.dbc->put.*$|
413		^\t*\.\.\.key\s\d*$|
414		^\t*\.\.\.Skipping\sdbc.*|
415		^\t*and\s\d*\sduplicate\sduplicates\.$|
416		^\t*About\sto\srun\srecovery\s.*complete$|
417		^\t*Add\sa\sthird\sversion\s.*|
418		^\t*Archive[:\.].*|
419		^\t*Backuptest.*|
420		^\t*Bigfile[0-9][0-9][0-9].*|
421		^\t*Building\s.*|
422		^\t*closing\ssecondaries\.$|
423		^\t*Command\sexecuted\sand\s.*$|
424		^\t*DBM.*|
425		^\t*[d|D]ead[0-9][0-9][0-9].*|
426		^\t*Dump\/load\sof.*|
427		^\t*[e|E]nv[0-9][0-9][0-9].*|
428		^\t*Executing\scommand$|
429		^\t*Executing\stxn_.*|
430		^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$|
431		^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$|
432		^\t*[f|F]op[0-9][0-9][0-9].*|
433		^\t*HSEARCH.*|
434		^\t*Initial\sCheckpoint$|
435		^\t*Iteration\s\d*:\sCheckpointing\.$|
436		^\t*Joining:\s.*|
437		^\t*Kid[1|2]\sabort\.\.\.complete$|
438		^\t*Kid[1|2]\scommit\.\.\.complete$|
439		^\t*[l|L]ock[0-9][0-9][0-9].*|
440		^\t*[l|L]og[0-9][0-9][0-9].*|
441		^\t*[m|M]emp[0-9][0-9][0-9].*|
442		^\t*[m|M]utex[0-9][0-9][0-9].*|
443		^\t*NDBM.*|
444		^\t*opening\ssecondaries\.$|
445		^\t*op_recover_rec:\sRunning\srecovery.*|
446		^\t*[r|R]ecd[0-9][0-9][0-9].*|
447		^\t*[r|R]ep[0-9][0-9][0-9].*|
448		^\t*[r|R]ep_test.*|
449		^\t*[r|R]pc[0-9][0-9][0-9].*|
450		^\t*[r|R]src[0-9][0-9][0-9].*|
451		^\t*Recover\sfrom\sfirst\sdatabase$|
452		^\t*Recover\sfrom\ssecond\sdatabase$|
453		^\t*Remove\ssecond\sdb$|
454		^\t*Rep_verify.*|
455		^\t*Run_rpcmethod.*|
456		^\t*Running\srecovery\son\s.*|
457		^\t*[s|S]ec[0-9][0-9][0-9].*|
458		^\t*[s|S]i[0-9][0-9][0-9].*|
459		^\t*[s|S]ijoin.*|
460		^\t*sdb[0-9][0-9][0-9].*|
461		^\t*Skipping\s.*|
462		^\t*Subdb[0-9][0-9][0-9].*|
463		^\t*Subdbtest[0-9][0-9][0-9].*|
464		^\t*Syncing$|
465		^\t*[t|T]est[0-9][0-9][0-9].*|
466		^\t*[t|T]xn[0-9][0-9][0-9].*|
467		^\t*Txnscript.*|
468		^\t*Using\s.*?\senvironment\.$|
469		^\t*Verification\sof.*|
470		^\t*with\stransactions$}
471
472	set failed 0
473	set f [open $file r]
474	while { [gets $f line] >= 0 } {
475		if { [regexp $pattern $line] == 0 } {
476			puts -nonewline "UNEXPECTED OUTPUT: "
477			puts $line
478			set failed 1
479		}
480	}
481	close $f
482	return $failed
483}
484
485proc r { args } {
486	global test_names
487	global has_crypto
488	global rand_init
489	global one_test
490	global test_recopts
491	global checking_valid_methods
492
493	source ./include.tcl
494
495	set exflgs [eval extractflags $args]
496	set args [lindex $exflgs 0]
497	set flags [lindex $exflgs 1]
498
499	set display 1
500	set run 1
501	set saveflags "--"
502	foreach f $flags {
503		switch $f {
504			n {
505				set display 1
506				set run 0
507				set saveflags "-n $saveflags"
508			}
509		}
510	}
511
512	if {[catch {
513		set sub [ lindex $args 0 ]
514		set starttest [lindex $args 1]
515		switch $sub {
516			bigfile -
517			dead -
518			env -
519			lock -
520			log -
521			memp -
522			rsrc -
523			sdbtest -
524			txn {
525				if { $display } {
526					run_subsystem $sub 1 0
527				}
528				if { $run } {
529					run_subsystem $sub
530				}
531			}
532			byte {
533				if { $one_test == "ALL" } {
534					run_test byteorder $display $run
535				}
536			}
537			archive -
538			backup -
539			dbm -
540			hsearch -
541			ndbm -
542			shelltest {
543				if { $one_test == "ALL" } {
544					if { $display } { puts "eval $sub" }
545					if { $run } {
546						check_handles
547						eval $sub
548					}
549				}
550			}
551			compact -
552			elect -
553			inmemdb -
554			init -
555			fop {
556				foreach test $test_names($sub) {
557					eval run_test $test $display $run
558				}
559			}
560			join {
561				eval r $saveflags join1
562				eval r $saveflags join2
563				eval r $saveflags join3
564				eval r $saveflags join4
565				eval r $saveflags join5
566				eval r $saveflags join6
567			}
568			join1 {
569				if { $display } { puts "eval jointest" }
570				if { $run } {
571					check_handles
572					eval jointest
573				}
574			}
575			joinbench {
576				puts "[timestamp]"
577				eval r $saveflags join1
578				eval r $saveflags join2
579				puts "[timestamp]"
580			}
581			join2 {
582				if { $display } { puts "eval jointest 512" }
583				if { $run } {
584					check_handles
585					eval jointest 512
586				}
587			}
588			join3 {
589				if { $display } {
590					puts "eval jointest 8192 0 -join_item"
591				}
592				if { $run } {
593					check_handles
594					eval jointest 8192 0 -join_item
595				}
596			}
597			join4 {
598				if { $display } { puts "eval jointest 8192 2" }
599				if { $run } {
600					check_handles
601					eval jointest 8192 2
602				}
603			}
604			join5 {
605				if { $display } { puts "eval jointest 8192 3" }
606				if { $run } {
607					check_handles
608					eval jointest 8192 3
609				}
610			}
611			join6 {
612				if { $display } { puts "eval jointest 512 3" }
613				if { $run } {
614					check_handles
615					eval jointest 512 3
616				}
617			}
618			recd {
619				check_handles
620				run_recds $run $display [lrange $args 1 end]
621			}
622			rep {
623				r rep_subset $starttest
624			}
625			# To run a subset of the complete rep tests, use
626			# rep_subset, which randomly picks an access type to
627			# use, and randomly picks whether to open envs with
628			# the -recover flag.
629			rep_subset {
630				berkdb srand $rand_init
631				set tindex [lsearch $test_names(rep) $starttest]
632				if { $tindex == -1 } {
633					set tindex 0
634				}
635				set rlist [lrange $test_names(rep) $tindex end]
636				foreach test $rlist {
637					set random_recopt \
638					    [berkdb random_int 0 1]
639					if { $random_recopt == 1 } {
640						set test_recopts "-recover"
641					} else {
642						set test_recopts {""}
643					}
644
645					set method_list \
646					    [find_valid_methods $test]
647					set list_length \
648					    [expr [llength $method_list] - 1]
649					set method_index \
650					    [berkdb random_int 0 $list_length]
651					set rand_method \
652					    [lindex $method_list $method_index]
653
654					if { $display } {
655						puts "eval $test \
656						    $rand_method; verify_dir \
657						    $testdir \"\" 1"
658					}
659					if { $run } {
660				 		check_handles
661						eval $test $rand_method
662						verify_dir $testdir "" 1
663					}
664				}
665				set test_recopts { "-recover" "" }
666			}
667			rep_complete {
668				set tindex [lsearch $test_names(rep) $starttest]
669				if { $tindex == -1 } {
670					set tindex 0
671				}
672				set rlist [lrange $test_names(rep) $tindex end]
673				foreach test $rlist {
674					run_test $test $display $run
675				}
676			}
677			repmethod {
678				# We seed the random number generator here
679				# instead of in run_repmethod so that we
680				# aren't always reusing the first few
681				# responses from random_int.
682				#
683				berkdb srand $rand_init
684				foreach sub { test sdb } {
685					foreach test $test_names($sub) {
686						eval run_test run_repmethod \
687						    $display $run $test
688					}
689				}
690			}
691			rpc {
692				if { $one_test == "ALL" } {
693					if { $display } { puts "r $sub" }
694					global BAT EXE rpc_svc svc_list
695					global rpc_svc svc_list is_je_test
696					set old_rpc_src $rpc_svc
697					foreach rpc_svc $svc_list {
698						if { $rpc_svc == "berkeley_dbje_svc" } {
699							set old_util_path $util_path
700							set util_path $je_root/dist
701							set is_je_test 1
702						}
703
704						if { !$run || \
705				      ![file exist $util_path/$rpc_svc$BAT] || \
706				      ![file exist $util_path/$rpc_svc$EXE] } {
707							continue
708						}
709
710						run_subsystem rpc
711						if { [catch {run_rpcmethod -txn} ret] != 0 } {
712							puts $ret
713						}
714
715						if { $is_je_test } {
716							check_handles
717							eval run_rpcmethod -btree
718							verify_dir $testdir "" 1
719						} else {
720							run_test run_rpcmethod $display $run
721						}
722
723						if { $is_je_test } {
724							set util_path $old_util_path
725							set is_je_test 0
726						}
727
728					}
729					set rpc_svc $old_rpc_src
730				}
731			}
732			sec {
733				# Skip secure mode tests if release
734				# does not support encryption.
735				if { $has_crypto == 0 } {
736					return
737				}
738				if { $display } {
739					run_subsystem $sub 1 0
740				}
741				if { $run } {
742					run_subsystem $sub 0 1
743				}
744			}
745			secmethod {
746				# Skip secure mode tests if release
747				# does not support encryption.
748				if { $has_crypto == 0 } {
749					return
750				}
751				foreach test $test_names(test) {
752					eval run_test run_secmethod \
753					    $display $run $test
754					eval run_test run_secenv \
755					    $display $run $test
756				}
757			}
758			sdb {
759				if { $one_test == "ALL" } {
760					if { $display } {
761						run_subsystem sdbtest 1 0
762					}
763					if { $run } {
764						run_subsystem sdbtest 0 1
765					}
766				}
767				foreach test $test_names(sdb) {
768					eval run_test $test $display $run
769				}
770			}
771			sindex {
772				if { $one_test == "ALL" } {
773					if { $display } {
774						sindex 1 0
775						sijoin 1 0
776					}
777					if { $run } {
778						sindex 0 1
779						sijoin 0 1
780					}
781				}
782			}
783			btree -
784			rbtree -
785			hash -
786			iqueue -
787			iqueueext -
788			queue -
789			queueext -
790			recno -
791			frecno -
792			rrecno {
793				foreach test $test_names(test) {
794					eval run_method [lindex $args 0] $test \
795					    $display $run [lrange $args 1 end]
796				}
797			}
798
799			default {
800				error \
801				    "FAIL:[timestamp] r: $args: unknown command"
802			}
803		}
804		flush stdout
805		flush stderr
806	} res] != 0} {
807		global errorInfo;
808		set fnl [string first "\n" $errorInfo]
809		set theError [string range $errorInfo 0 [expr $fnl - 1]]
810		if {[string first FAIL $errorInfo] == -1} {
811			error "FAIL:[timestamp] r: $args: $theError"
812		} else {
813			error $theError;
814		}
815	}
816}
817
818proc run_subsystem { sub { display 0 } { run 1} } {
819	global test_names
820
821	if { [info exists test_names($sub)] != 1 } {
822		puts stderr "Subsystem $sub has no tests specified in\
823		    testparams.tcl; skipping."
824		return
825	}
826	foreach test $test_names($sub) {
827		if { $display } {
828			puts "eval $test"
829		}
830		if { $run } {
831			check_handles
832			if {[catch {eval $test} ret] != 0 } {
833				puts "FAIL: run_subsystem: $sub $test: \
834				    $ret"
835			}
836		}
837	}
838}
839
840proc run_test { test {display 0} {run 1} args } {
841	source ./include.tcl
842	global valid_methods
843
844	foreach method $valid_methods {
845		if { $display } {
846			puts "eval $test -$method $args; verify_dir $testdir \"\" 1"
847		}
848		if { $run } {
849	 		check_handles
850			eval $test -$method $args
851			verify_dir $testdir "" 1
852		}
853	}
854}
855
856proc run_method { method test {display 0} {run 1} \
857    { outfile stdout } args } {
858	global __debug_on
859	global __debug_print
860	global __debug_test
861	global test_names
862	global parms
863	source ./include.tcl
864
865	if {[catch {
866		if { $display } {
867			puts -nonewline $outfile "eval $test $method"
868			puts -nonewline $outfile " $parms($test) $args"
869			puts $outfile " ; verify_dir $testdir \"\" 1"
870		}
871		if { $run } {
872			check_handles $outfile
873			puts $outfile "[timestamp]"
874			eval $test $method $parms($test) $args
875			if { $__debug_print != 0 } {
876				puts $outfile ""
877			}
878			# verify all databases the test leaves behind
879			verify_dir $testdir "" 1
880			if { $__debug_on != 0 } {
881				debug $__debug_test
882			}
883		}
884		flush stdout
885		flush stderr
886	} res] != 0} {
887		global errorInfo;
888
889		set fnl [string first "\n" $errorInfo]
890		set theError [string range $errorInfo 0 [expr $fnl - 1]]
891		if {[string first FAIL $errorInfo] == -1} {
892			error "FAIL:[timestamp]\
893			    run_method: $method $test: $theError"
894		} else {
895			error $theError;
896		}
897	}
898}
899
900proc run_rpcmethod { method {largs ""} } {
901	global __debug_on
902	global __debug_print
903	global __debug_test
904	global rpc_tests
905	global parms
906	global is_envmethod
907	global rpc_svc
908	source ./include.tcl
909
910	puts "run_rpcmethod: $method $largs using $rpc_svc"
911
912	set save_largs $largs
913	set dpid [rpc_server_start]
914	puts "\tRun_rpcmethod.a: started server, pid $dpid"
915	remote_cleanup $rpc_server $rpc_testdir $testdir
916
917	set home [file tail $rpc_testdir]
918
919	set is_envmethod 1
920	set use_txn 0
921	if { [string first "txn" $method] != -1 } {
922		set use_txn 1
923	}
924	if { $use_txn == 1 } {
925		set ntxns 32
926		set i 1
927		check_handles
928		remote_cleanup $rpc_server $rpc_testdir $testdir
929		set env [eval {berkdb_env -create -mode 0644 -home $home \
930		    -server $rpc_server -client_timeout 10000} -txn]
931		error_check_good env_open [is_valid_env $env] TRUE
932
933		set stat [catch {eval txn001_suba $ntxns $env} res]
934		if { $stat == 0 } {
935			set stat [catch {eval txn001_subb $ntxns $env} res]
936		}
937		set stat [catch {eval txn003} res]
938		error_check_good envclose [$env close] 0
939	} else {
940		foreach test $rpc_tests($rpc_svc) {
941			set stat [catch {
942				check_handles
943				remote_cleanup $rpc_server $rpc_testdir $testdir
944				#
945				# Set server cachesize to 128Mb.  Otherwise
946				# some tests won't fit (like test084 -btree).
947				#
948				set env [eval {berkdb_env -create -mode 0644 \
949				    -home $home -server $rpc_server \
950				    -client_timeout 10000 \
951				    -cachesize {0 134217728 1}}]
952				error_check_good env_open \
953				    [is_valid_env $env] TRUE
954				set largs $save_largs
955				append largs " -env $env "
956
957				puts "[timestamp]"
958				puts "Running test $test with RPC service $rpc_svc"
959				puts "eval $test $method $parms($test) $largs"
960				eval $test $method $parms($test) $largs
961				if { $__debug_print != 0 } {
962					puts ""
963				}
964				if { $__debug_on != 0 } {
965					debug $__debug_test
966				}
967				flush stdout
968				flush stderr
969				error_check_good envclose [$env close] 0
970				set env ""
971			} res]
972
973			if { $stat != 0} {
974				global errorInfo;
975
976				puts "$res"
977
978				set fnl [string first "\n" $errorInfo]
979				set theError [string range $errorInfo 0 [expr $fnl - 1]]
980				if {[string first FAIL $errorInfo] == -1} {
981					puts "FAIL:[timestamp]\
982					    run_rpcmethod: $method $test: $errorInfo"
983				} else {
984					puts $theError;
985				}
986
987				catch { $env close } ignore
988				set env ""
989				tclkill $dpid
990				set dpid [rpc_server_start]
991			}
992		}
993	}
994	set is_envmethod 0
995	tclkill $dpid
996}
997
998proc run_rpcnoserver { method {largs ""} } {
999	global __debug_on
1000	global __debug_print
1001	global __debug_test
1002	global test_names
1003	global parms
1004	global is_envmethod
1005	source ./include.tcl
1006
1007	puts "run_rpcnoserver: $method $largs"
1008
1009	set save_largs $largs
1010	remote_cleanup $rpc_server $rpc_testdir $testdir
1011	set home [file tail $rpc_testdir]
1012
1013	set is_envmethod 1
1014	set use_txn 0
1015	if { [string first "txn" $method] != -1 } {
1016		set use_txn 1
1017	}
1018	if { $use_txn == 1 } {
1019		set ntxns 32
1020		set i 1
1021		check_handles
1022		remote_cleanup $rpc_server $rpc_testdir $testdir
1023		set env [eval {berkdb_env -create -mode 0644 -home $home \
1024		    -server $rpc_server -client_timeout 10000} -txn]
1025		error_check_good env_open [is_valid_env $env] TRUE
1026
1027		set stat [catch {eval txn001_suba $ntxns $env} res]
1028		if { $stat == 0 } {
1029			set stat [catch {eval txn001_subb $ntxns $env} res]
1030		}
1031		error_check_good envclose [$env close] 0
1032	} else {
1033		set stat [catch {
1034			foreach test $test_names {
1035				check_handles
1036				if { [info exists parms($test)] != 1 } {
1037					puts stderr "$test disabled in \
1038					    testparams.tcl; skipping."
1039					continue
1040				}
1041				remote_cleanup $rpc_server $rpc_testdir $testdir
1042				#
1043				# Set server cachesize to 1Mb.  Otherwise some
1044				# tests won't fit (like test084 -btree).
1045				#
1046				set env [eval {berkdb_env -create -mode 0644 \
1047				    -home $home -server $rpc_server \
1048				    -client_timeout 10000 \
1049				    -cachesize {0 1048576 1} }]
1050				error_check_good env_open \
1051				    [is_valid_env $env] TRUE
1052				append largs " -env $env "
1053
1054				puts "[timestamp]"
1055				eval $test $method $parms($test) $largs
1056				if { $__debug_print != 0 } {
1057					puts ""
1058				}
1059				if { $__debug_on != 0 } {
1060					debug $__debug_test
1061				}
1062				flush stdout
1063				flush stderr
1064				set largs $save_largs
1065				error_check_good envclose [$env close] 0
1066			}
1067		} res]
1068	}
1069	if { $stat != 0} {
1070		global errorInfo;
1071
1072		set fnl [string first "\n" $errorInfo]
1073		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1074		if {[string first FAIL $errorInfo] == -1} {
1075			error "FAIL:[timestamp]\
1076			    run_rpcnoserver: $method $i: $theError"
1077		} else {
1078			error $theError;
1079		}
1080	set is_envmethod 0
1081	}
1082
1083}
1084
1085#
1086# Run method tests in secure mode.
1087#
1088proc run_secmethod { method test {display 0} {run 1} \
1089    { outfile stdout } args } {
1090	global passwd
1091	global has_crypto
1092
1093	# Skip secure mode tests if release does not support encryption.
1094	if { $has_crypto == 0 } {
1095		return
1096	}
1097
1098	set largs $args
1099	append largs " -encryptaes $passwd "
1100	eval run_method $method $test $display $run $outfile $largs
1101}
1102
1103#
1104# Run method tests each in its own, new secure environment.
1105#
1106proc run_secenv { method test {largs ""} } {
1107	global __debug_on
1108	global __debug_print
1109	global __debug_test
1110	global is_envmethod
1111	global has_crypto
1112	global test_names
1113	global parms
1114	global passwd
1115	source ./include.tcl
1116
1117	# Skip secure mode tests if release does not support encryption.
1118	if { $has_crypto == 0 } {
1119		return
1120	}
1121
1122	puts "run_secenv: $method $test $largs"
1123
1124	set save_largs $largs
1125	env_cleanup $testdir
1126	set is_envmethod 1
1127	set stat [catch {
1128		check_handles
1129		set env [eval {berkdb_env -create -mode 0644 -home $testdir \
1130		    -encryptaes $passwd -cachesize {0 4194304 1}}]
1131		error_check_good env_open [is_valid_env $env] TRUE
1132		append largs " -env $env "
1133
1134		puts "[timestamp]"
1135		if { [info exists parms($test)] != 1 } {
1136			puts stderr "$test disabled in\
1137			    testparams.tcl; skipping."
1138			continue
1139		}
1140
1141		#
1142		# Run each test multiple times in the secure env.
1143		# Once with a secure env + clear database
1144		# Once with a secure env + secure database
1145		#
1146		eval $test $method $parms($test) $largs
1147		append largs " -encrypt "
1148		eval $test $method $parms($test) $largs
1149
1150		if { $__debug_print != 0 } {
1151			puts ""
1152		}
1153		if { $__debug_on != 0 } {
1154			debug $__debug_test
1155		}
1156		flush stdout
1157		flush stderr
1158		set largs $save_largs
1159		error_check_good envclose [$env close] 0
1160		error_check_good envremove [berkdb envremove \
1161		    -home $testdir -encryptaes $passwd] 0
1162	} res]
1163	if { $stat != 0} {
1164		global errorInfo;
1165
1166		set fnl [string first "\n" $errorInfo]
1167		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1168		if {[string first FAIL $errorInfo] == -1} {
1169			error "FAIL:[timestamp]\
1170			    run_secenv: $method $test: $theError"
1171		} else {
1172			error $theError;
1173		}
1174	set is_envmethod 0
1175	}
1176
1177}
1178
1179#
1180# Run replication method tests in master and client env.
1181#
1182proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
1183    {do_sec 0} {do_oob 0} {largs "" } } {
1184	source ./include.tcl
1185	if { $is_windows9x_test == 1 } {
1186		puts "Skipping replication test on Win 9x platform."
1187		return
1188	}
1189
1190	global __debug_on
1191	global __debug_print
1192	global __debug_test
1193	global is_envmethod
1194	global parms
1195	global passwd
1196	global has_crypto
1197
1198	puts "run_reptest \
1199	    $method $test $droppct $nclients $do_del $do_sec $do_oob $largs"
1200
1201	env_cleanup $testdir
1202	set is_envmethod 1
1203	set stat [catch {
1204		if { $do_sec && $has_crypto } {
1205			set envargs "-encryptaes $passwd"
1206			append largs " -encrypt "
1207		} else {
1208			set envargs ""
1209		}
1210		check_handles
1211		#
1212		# This will set up the master and client envs
1213		# and will return us the args to pass to the
1214		# test.
1215
1216		set largs [repl_envsetup \
1217		    $envargs $largs $test $nclients $droppct $do_oob]
1218
1219		puts "[timestamp]"
1220		if { [info exists parms($test)] != 1 } {
1221			puts stderr "$test disabled in\
1222			    testparams.tcl; skipping."
1223			continue
1224		}
1225
1226		puts -nonewline \
1227		    "Repl: $test: dropping $droppct%, $nclients clients "
1228		if { $do_del } {
1229			puts -nonewline " with delete verification;"
1230		} else {
1231			puts -nonewline " no delete verification;"
1232		}
1233		if { $do_sec } {
1234			puts -nonewline " with security;"
1235		} else {
1236			puts -nonewline " no security;"
1237		}
1238		if { $do_oob } {
1239			puts -nonewline " with out-of-order msgs;"
1240		} else {
1241			puts -nonewline " no out-of-order msgs;"
1242		}
1243		puts ""
1244
1245		eval $test $method $parms($test) $largs
1246
1247		if { $__debug_print != 0 } {
1248			puts ""
1249		}
1250		if { $__debug_on != 0 } {
1251			debug $__debug_test
1252		}
1253		flush stdout
1254		flush stderr
1255		repl_envprocq $test $nclients $do_oob
1256		repl_envver0 $test $method $nclients
1257		if { $do_del } {
1258			repl_verdel $test $method $nclients
1259		}
1260		repl_envclose $test $envargs
1261	} res]
1262	if { $stat != 0} {
1263		global errorInfo;
1264
1265		set fnl [string first "\n" $errorInfo]
1266		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1267		if {[string first FAIL $errorInfo] == -1} {
1268			error "FAIL:[timestamp]\
1269			    run_reptest: $method $test: $theError"
1270		} else {
1271			error $theError;
1272		}
1273	}
1274	set is_envmethod 0
1275}
1276
1277#
1278# Run replication method tests in master and client env.
1279#
1280proc run_repmethod { method test {numcl 0} {display 0} {run 1} \
1281    {outfile stdout} {largs ""} } {
1282	source ./include.tcl
1283	if { $is_windows9x_test == 1 } {
1284		puts "Skipping replication test on Win 9x platform."
1285		return
1286	}
1287
1288	global __debug_on
1289	global __debug_print
1290	global __debug_test
1291	global is_envmethod
1292	global test_names
1293	global parms
1294	global has_crypto
1295	global passwd
1296
1297	set save_largs $largs
1298	env_cleanup $testdir
1299
1300	# Use an array for number of clients because we really don't
1301	# want to evenly-weight all numbers of clients.  Favor smaller
1302	# numbers but test more clients occasionally.
1303	set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
1304	set drop_len [expr [llength $drop_list] - 1]
1305	set client_list { 1 1 2 1 1 1 2 2 3 1 }
1306	set cl_len [expr [llength $client_list] - 1]
1307
1308	if { $numcl == 0 } {
1309		set clindex [berkdb random_int 0 $cl_len]
1310		set nclients [lindex $client_list $clindex]
1311	} else {
1312		set nclients $numcl
1313	}
1314	set drindex [berkdb random_int 0 $drop_len]
1315	set droppct [lindex $drop_list $drindex]
1316
1317	# Do not drop messages on Windows.  Since we can't set
1318	# re-request times with less than millisecond precision,
1319	# dropping messages will cause test failures.
1320	if { $is_windows_test == 1 } {
1321		set droppct 0
1322	}
1323
1324 	set do_sec [berkdb random_int 0 1]
1325	set do_oob [berkdb random_int 0 1]
1326	set do_del [berkdb random_int 0 1]
1327
1328	if { $display == 1 } {
1329		puts $outfile "eval run_reptest $method $test $droppct \
1330		    $nclients $do_del $do_sec $do_oob $largs"
1331	}
1332	if { $run == 1 } {
1333		run_reptest $method $test $droppct $nclients $do_del \
1334		    $do_sec $do_oob $largs
1335	}
1336}
1337
1338#
1339# Run method tests, each in its own, new environment.  (As opposed to
1340# run_envmethod1 which runs all the tests in a single environment.)
1341#
1342proc run_envmethod { method test {display 0} {run 1} {outfile stdout} \
1343    { largs "" } } {
1344	global __debug_on
1345	global __debug_print
1346	global __debug_test
1347	global is_envmethod
1348	global test_names
1349	global parms
1350	source ./include.tcl
1351
1352	set save_largs $largs
1353	set envargs ""
1354
1355	# Enlarge the logging region by default - sdb004 needs this because
1356	# it uses very long subdb names, and the names are stored in the
1357	# env region.
1358	set logargs " -log_regionmax 2057152 "
1359
1360	# Enlarge the cache by default - some compaction tests need it.
1361	set cacheargs "-cachesize {0 4194304 1}"
1362	env_cleanup $testdir
1363
1364	if { $display == 1 } {
1365		puts $outfile "eval run_envmethod $method \
1366		    $test 0 1 stdout $largs"
1367	}
1368
1369	# To run a normal test using system memory, call run_envmethod
1370	# with the flag -shm.
1371	set sindex [lsearch -exact $largs "-shm"]
1372	if { $sindex >= 0 } {
1373		if { [mem_chk " -system_mem -shm_key 1 "] == 1 } {
1374			break
1375		} else {
1376			append envargs " -system_mem -shm_key 1 "
1377			set largs [lreplace $largs $sindex $sindex]
1378		}
1379	}
1380
1381	# Test for -thread option and pass to berkdb_env open.  Leave in
1382	# $largs because -thread can also be passed to an individual
1383	# test as an arg.  Double the number of lockers because a threaded
1384	# env requires more than an ordinary env.
1385	if { [lsearch -exact $largs "-thread"] != -1 } {
1386		append envargs " -thread -lock_max_lockers 2000 "
1387	}
1388
1389	# Test for -alloc option and pass to berkdb_env open only.
1390	# Remove from largs because -alloc is not an allowed test arg.
1391	set aindex [lsearch -exact $largs "-alloc"]
1392	if { $aindex >= 0 } {
1393		append envargs " -alloc "
1394		set largs [lreplace $largs $aindex $aindex]
1395	}
1396
1397	# We raise the number of locks and objects - there are a few
1398	# compaction tests that require a large number.
1399	set lockargs " -lock_max_locks 40000 -lock_max_objects 20000 "
1400
1401	if { $run == 1 } {
1402		set is_envmethod 1
1403		set stat [catch {
1404			check_handles
1405			set env [eval {berkdb_env -create -txn -mode 0644 \
1406			    -home $testdir} $logargs $cacheargs $lockargs $envargs]
1407			error_check_good env_open [is_valid_env $env] TRUE
1408			append largs " -env $env "
1409
1410			puts "[timestamp]"
1411			if { [info exists parms($test)] != 1 } {
1412				puts stderr "$test disabled in\
1413				    testparams.tcl; skipping."
1414				continue
1415			}
1416			eval $test $method $parms($test) $largs
1417
1418			if { $__debug_print != 0 } {
1419				puts ""
1420			}
1421			if { $__debug_on != 0 } {
1422				debug $__debug_test
1423			}
1424			flush stdout
1425			flush stderr
1426			set largs $save_largs
1427			error_check_good envclose [$env close] 0
1428			error_check_good envremove [berkdb envremove \
1429			    -home $testdir] 0
1430		} res]
1431		if { $stat != 0} {
1432			global errorInfo;
1433
1434			set fnl [string first "\n" $errorInfo]
1435			set theError [string range $errorInfo 0 [expr $fnl - 1]]
1436			if {[string first FAIL $errorInfo] == -1} {
1437				error "FAIL:[timestamp]\
1438				    run_envmethod: $method $test: $theError"
1439			} else {
1440				error $theError;
1441			}
1442		}
1443		set is_envmethod 0
1444	}
1445}
1446
1447proc run_recd { method test {run 1} {display 0} args } {
1448	global __debug_on
1449	global __debug_print
1450	global __debug_test
1451	global parms
1452	global test_names
1453	global log_log_record_types
1454	global gen_upgrade_log
1455	global upgrade_be
1456	global upgrade_dir
1457	global upgrade_method
1458	global upgrade_name
1459	source ./include.tcl
1460
1461	if { $run == 1 } {
1462		puts "run_recd: $method $test $parms($test) $args"
1463	}
1464	if {[catch {
1465		if { $display } {
1466			puts "eval $test $method $parms($test) $args"
1467		}
1468		if { $run } {
1469			check_handles
1470			set upgrade_method $method
1471			set upgrade_name $test
1472			puts "[timestamp]"
1473			# By redirecting stdout to stdout, we make exec
1474			# print output rather than simply returning it.
1475			# By redirecting stderr to stdout too, we make
1476			# sure everything winds up in the ALL.OUT file.
1477			set ret [catch { exec $tclsh_path << \
1478			    "source $test_path/test.tcl; \
1479			    set log_log_record_types $log_log_record_types;\
1480			    set gen_upgrade_log $gen_upgrade_log;\
1481			    set upgrade_be $upgrade_be; \
1482			    set upgrade_dir $upgrade_dir; \
1483			    set upgrade_method $upgrade_method; \
1484			    set upgrade_name $upgrade_name; \
1485			    eval $test $method $parms($test) $args" \
1486			    >&@ stdout
1487			} res]
1488
1489			# Don't die if the test failed;  we want
1490			# to just proceed.
1491			if { $ret != 0 } {
1492				puts "FAIL:[timestamp] $res"
1493			}
1494
1495			if { $__debug_print != 0 } {
1496				puts ""
1497			}
1498			if { $__debug_on != 0 } {
1499				debug $__debug_test
1500			}
1501			flush stdout
1502			flush stderr
1503		}
1504	} res] != 0} {
1505		global errorInfo;
1506
1507		set fnl [string first "\n" $errorInfo]
1508		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1509		if {[string first FAIL $errorInfo] == -1} {
1510			error "FAIL:[timestamp]\
1511			    run_recd: $method: $theError"
1512		} else {
1513			error $theError;
1514		}
1515	}
1516}
1517
1518proc run_recds { {run 1} {display 0} args } {
1519	source ./include.tcl
1520	global log_log_record_types
1521	global test_names
1522	global gen_upgrade_log
1523	global encrypt
1524	global valid_methods
1525
1526	set log_log_record_types 1
1527	logtrack_init
1528
1529	# Define a small set of tests to run with log file zeroing.
1530	set zero_log_tests \
1531	    {recd001 recd002 recd003 recd004 recd005 recd006 recd007}
1532
1533	foreach method $valid_methods {
1534		check_handles
1535#set test_names(recd) "recd005 recd017"
1536		foreach test $test_names(recd) {
1537			# Skip recd017 for non-crypto upgrade testing.
1538			# Run only recd017 for crypto upgrade testing.
1539			if { $gen_upgrade_log == 1 && $test == "recd017" && \
1540			    $encrypt == 0 } {
1541				puts "Skipping recd017 for non-crypto run."
1542				continue
1543			}
1544			if { $gen_upgrade_log == 1 && $test != "recd017" && \
1545			    $encrypt == 1 } {
1546				puts "Skipping $test for crypto run."
1547				continue
1548			}
1549			if { [catch {eval run_recd $method $test $run \
1550			    $display $args} ret ] != 0 } {
1551				puts $ret
1552			}
1553
1554			# If it's one of the chosen tests, and btree, run with
1555			# log file zeroing.
1556			set zlog_idx [lsearch -exact $zero_log_tests $test]
1557			if { $method == "btree" && $zlog_idx > -1 } {
1558				if { [catch {eval run_recd $method $test \
1559				    $run $display -zero_log $args} ret ] != 0 } {
1560					puts $ret
1561				}
1562			}
1563
1564			if { $gen_upgrade_log == 1 } {
1565				save_upgrade_files $testdir
1566			}
1567		}
1568	}
1569
1570	# We can skip logtrack_summary during the crypto upgrade run -
1571	# it doesn't introduce any new log types.
1572	if { $run } {
1573		if { $gen_upgrade_log == 0 || $encrypt == 0 } {
1574			logtrack_summary
1575		}
1576	}
1577	set log_log_record_types 0
1578}
1579
1580proc run_all { { testname ALL } args } {
1581	global test_names
1582	global one_test
1583	global has_crypto
1584	global valid_methods
1585	source ./include.tcl
1586
1587	fileremove -f ALL.OUT
1588
1589	set one_test $testname
1590	if { $one_test != "ALL" } {
1591		# Source testparams again to adjust test_names.
1592		source $test_path/testparams.tcl
1593	}
1594
1595	set exflgs [eval extractflags $args]
1596	set flags [lindex $exflgs 1]
1597	set display 1
1598	set run 1
1599	set am_only 0
1600	set parallel 0
1601	set nparalleltests 0
1602	set rflags {--}
1603	foreach f $flags {
1604		switch $f {
1605			m {
1606				set am_only 1
1607			}
1608			n {
1609				set display 1
1610				set run 0
1611				set rflags [linsert $rflags 0 "-n"]
1612			}
1613		}
1614	}
1615
1616	set o [open ALL.OUT a]
1617	if { $run == 1 } {
1618		puts -nonewline "Test suite run started at: "
1619		puts [clock format [clock seconds] -format "%H:%M %D"]
1620		puts [berkdb version -string]
1621
1622		puts -nonewline $o "Test suite run started at: "
1623		puts $o [clock format [clock seconds] -format "%H:%M %D"]
1624		puts $o [berkdb version -string]
1625	}
1626	close $o
1627	#
1628	# First run standard tests.  Send in a -A to let run_std know
1629	# that it is part of the "run_all" run, so that it doesn't
1630	# print out start/end times.
1631	#
1632	lappend args -A
1633	eval {run_std} $one_test $args
1634
1635	set test_pagesizes [get_test_pagesizes]
1636	set args [lindex $exflgs 0]
1637	set save_args $args
1638
1639	foreach pgsz $test_pagesizes {
1640		set args $save_args
1641		append args " -pagesize $pgsz -chksum"
1642		if { $am_only == 0 } {
1643			# Run recovery tests.
1644			#
1645			# XXX These don't actually work at multiple pagesizes;
1646			# disable them for now.
1647			#
1648			# XXX These too are broken into separate tclsh
1649			# instantiations so we don't require so much
1650			# memory, but I think it's cleaner
1651			# and more useful to do it down inside proc r than here,
1652			# since "r recd" gets done a lot and needs to work.
1653			#
1654			# XXX See comment in run_std for why this only directs
1655			# stdout and not stderr.  Don't worry--the right stuff
1656			# happens.
1657			#puts "Running recovery tests with pagesize $pgsz"
1658			#if [catch {exec $tclsh_path \
1659			#    << "source $test_path/test.tcl; \
1660			#    r $rflags recd $args" \
1661			#    2>@ stderr >> ALL.OUT } res] {
1662			#	set o [open ALL.OUT a]
1663			#	puts $o "FAIL: recd test:"
1664			#	puts $o $res
1665			#	close $o
1666			#}
1667		}
1668
1669		# Access method tests.
1670		# Run subdb tests with varying pagesizes too.
1671		# XXX
1672		# Broken up into separate tclsh instantiations so
1673		# we don't require so much memory.
1674		foreach method $valid_methods {
1675			puts "Running $method tests with pagesize $pgsz"
1676			foreach sub {test sdb si} {
1677				foreach test $test_names($sub) {
1678					if { $run == 0 } {
1679						set o [open ALL.OUT a]
1680						eval {run_method -$method \
1681						    $test $display $run $o} \
1682						    $args
1683						close $o
1684					}
1685					if { $run } {
1686						if [catch {exec $tclsh_path << \
1687						    "global one_test; \
1688						    set one_test $one_test; \
1689						    source $test_path/test.tcl; \
1690						    eval {run_method -$method \
1691						    $test $display $run \
1692						    stdout} $args" \
1693						    >>& ALL.OUT } res] {
1694							set o [open ALL.OUT a]
1695							puts $o "FAIL: \
1696							    -$method $test: $res"
1697							close $o
1698						}
1699					}
1700				}
1701			}
1702		}
1703	}
1704	set args $save_args
1705	#
1706	# Run access method tests at default page size in one env.
1707	#
1708	foreach method $valid_methods {
1709		puts "Running $method tests in a txn env"
1710		foreach sub {test sdb si} {
1711			foreach test $test_names($sub) {
1712				if { $run == 0 } {
1713					set o [open ALL.OUT a]
1714					run_envmethod -$method $test $display \
1715					    $run $o $args
1716					close $o
1717				}
1718				if { $run } {
1719					if [catch {exec $tclsh_path << \
1720					    "global one_test; \
1721					    set one_test $one_test; \
1722					    source $test_path/test.tcl; \
1723					    run_envmethod -$method $test \
1724				  	    $display $run stdout $args" \
1725					    >>& ALL.OUT } res] {
1726						set o [open ALL.OUT a]
1727						puts $o "FAIL: run_envmethod \
1728						    $method $test: $res"
1729						close $o
1730					}
1731				}
1732			}
1733		}
1734	}
1735	#
1736	# Run access method tests at default page size in thread-enabled env.
1737	# We're not truly running threaded tests, just testing the interface.
1738	#
1739	foreach method $valid_methods {
1740		puts "Running $method tests in a threaded txn env"
1741		foreach sub {test sdb si} {
1742			foreach test $test_names($sub) {
1743				if { $run == 0 } {
1744					set o [open ALL.OUT a]
1745					eval {run_envmethod -$method $test \
1746					    $display $run $o -thread}
1747					close $o
1748				}
1749				if { $run } {
1750					if [catch {exec $tclsh_path << \
1751					    "global one_test; \
1752					    set one_test $one_test; \
1753					    source $test_path/test.tcl; \
1754					    eval {run_envmethod -$method $test \
1755				  	    $display $run stdout -thread}" \
1756					    >>& ALL.OUT } res] {
1757						set o [open ALL.OUT a]
1758						puts $o "FAIL: run_envmethod \
1759						    $method $test -thread: $res"
1760						close $o
1761					}
1762				}
1763			}
1764		}
1765	}
1766	#
1767	# Run access method tests at default page size with -alloc enabled.
1768	#
1769	foreach method $valid_methods {
1770		puts "Running $method tests in an env with -alloc"
1771		foreach sub {test sdb si} {
1772			foreach test $test_names($sub) {
1773				if { $run == 0 } {
1774					set o [open ALL.OUT a]
1775					eval {run_envmethod -$method $test \
1776					    $display $run $o -alloc}
1777					close $o
1778				}
1779				if { $run } {
1780					if [catch {exec $tclsh_path << \
1781					    "global one_test; \
1782					    set one_test $one_test; \
1783					    source $test_path/test.tcl; \
1784					    eval {run_envmethod -$method $test \
1785				  	    $display $run stdout -alloc}" \
1786					    >>& ALL.OUT } res] {
1787						set o [open ALL.OUT a]
1788						puts $o "FAIL: run_envmethod \
1789						    $method $test -alloc: $res"
1790						close $o
1791					}
1792				}
1793			}
1794		}
1795	}
1796
1797	# Run standard access method tests under replication.
1798	#
1799	set test_list [list {"testNNN under replication"	"repmethod"}]
1800
1801	# If we're on Windows, Linux, FreeBSD, or Solaris, run the
1802	# bigfile tests.  These create files larger than 4 GB.
1803	if { $is_freebsd_test == 1 || $is_linux_test == 1 || \
1804	    $is_sunos_test == 1 || $is_windows_test == 1 } {
1805		lappend test_list {"big files"	"bigfile"}
1806	}
1807
1808	# If release supports encryption, run security tests.
1809	#
1810	if { $has_crypto == 1 } {
1811		lappend test_list {"testNNN with security"	"secmethod"}
1812	}
1813	#
1814	# If configured for RPC, then run rpc tests too.
1815	#
1816	if { [file exists ./berkeley_db_svc] ||
1817	     [file exists ./berkeley_db_cxxsvc] ||
1818	     [file exists ./berkeley_db_javasvc] } {
1819		lappend test_list {"RPC"	"rpc"}
1820	}
1821
1822	foreach pair $test_list {
1823		set msg [lindex $pair 0]
1824		set cmd [lindex $pair 1]
1825		puts "Running $msg tests"
1826		if [catch {exec $tclsh_path << \
1827		    "global one_test; set one_test $one_test; \
1828		    source $test_path/test.tcl; \
1829		    r $rflags $cmd $args" >>& ALL.OUT } res] {
1830			set o [open ALL.OUT a]
1831			puts $o "FAIL: $cmd test: $res"
1832			close $o
1833		}
1834	}
1835
1836	# If not actually running, no need to check for failure.
1837	if { $run == 0 } {
1838		return
1839	}
1840
1841	set failed 0
1842	set o [open ALL.OUT r]
1843	while { [gets $o line] >= 0 } {
1844		if { [regexp {^FAIL} $line] != 0 } {
1845			set failed 1
1846		}
1847	}
1848	close $o
1849	set o [open ALL.OUT a]
1850	if { $failed == 0 } {
1851		puts "Regression Tests Succeeded"
1852		puts $o "Regression Tests Succeeded"
1853	} else {
1854		puts "Regression Tests Failed; see ALL.OUT for log"
1855		puts $o "Regression Tests Failed"
1856	}
1857
1858	puts -nonewline "Test suite run completed at: "
1859	puts [clock format [clock seconds] -format "%H:%M %D"]
1860	puts -nonewline $o "Test suite run completed at: "
1861	puts $o [clock format [clock seconds] -format "%H:%M %D"]
1862	close $o
1863}
1864
1865#
1866# Run method tests in one environment.  (As opposed to run_envmethod
1867# which runs each test in its own, new environment.)
1868#
1869proc run_envmethod1 { method {display 0} {run 1} { outfile stdout } args } {
1870	global __debug_on
1871	global __debug_print
1872	global __debug_test
1873	global is_envmethod
1874	global test_names
1875	global parms
1876	source ./include.tcl
1877
1878	if { $run == 1 } {
1879		puts "run_envmethod1: $method $args"
1880	}
1881
1882	set is_envmethod 1
1883	if { $run == 1 } {
1884		check_handles
1885		env_cleanup $testdir
1886		error_check_good envremove [berkdb envremove -home $testdir] 0
1887		set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
1888		    {-mode 0644 -home $testdir}]
1889		error_check_good env_open [is_valid_env $env] TRUE
1890		append largs " -env $env "
1891	}
1892
1893	if { $display } {
1894		# The envmethod1 tests can't be split up, since they share
1895		# an env.
1896		puts $outfile "eval run_envmethod1 $method $args"
1897	}
1898
1899	set stat [catch {
1900		foreach test $test_names(test) {
1901			if { [info exists parms($test)] != 1 } {
1902				puts stderr "$test disabled in\
1903				    testparams.tcl; skipping."
1904				continue
1905			}
1906			if { $run } {
1907				puts $outfile "[timestamp]"
1908				eval $test $method $parms($test) $largs
1909				if { $__debug_print != 0 } {
1910					puts $outfile ""
1911				}
1912				if { $__debug_on != 0 } {
1913					debug $__debug_test
1914				}
1915			}
1916			flush stdout
1917			flush stderr
1918		}
1919	} res]
1920	if { $stat != 0} {
1921		global errorInfo;
1922
1923		set fnl [string first "\n" $errorInfo]
1924		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1925		if {[string first FAIL $errorInfo] == -1} {
1926			error "FAIL:[timestamp]\
1927			    run_envmethod: $method $test: $theError"
1928		} else {
1929			error $theError;
1930		}
1931	}
1932	set stat [catch {
1933		foreach test $test_names(test) {
1934			if { [info exists parms($test)] != 1 } {
1935				puts stderr "$test disabled in\
1936				    testparams.tcl; skipping."
1937				continue
1938			}
1939			if { $run } {
1940				puts $outfile "[timestamp]"
1941				eval $test $method $parms($test) $largs
1942				if { $__debug_print != 0 } {
1943					puts $outfile ""
1944				}
1945				if { $__debug_on != 0 } {
1946					debug $__debug_test
1947				}
1948			}
1949			flush stdout
1950			flush stderr
1951		}
1952	} res]
1953	if { $stat != 0} {
1954		global errorInfo;
1955
1956		set fnl [string first "\n" $errorInfo]
1957		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1958		if {[string first FAIL $errorInfo] == -1} {
1959			error "FAIL:[timestamp]\
1960			    run_envmethod1: $method $test: $theError"
1961		} else {
1962			error $theError;
1963		}
1964	}
1965	if { $run == 1 } {
1966		error_check_good envclose [$env close] 0
1967		check_handles $outfile
1968	}
1969	set is_envmethod 0
1970
1971}
1972
1973# Run the secondary index tests.
1974proc sindex { {display 0} {run 1} {outfile stdout} {verbose 0} args } {
1975	global test_names
1976	global testdir
1977	global verbose_check_secondaries
1978	set verbose_check_secondaries $verbose
1979	# Standard number of secondary indices to create if a single-element
1980	# list of methods is passed into the secondary index tests.
1981	global nsecondaries
1982	set nsecondaries 2
1983
1984	# Run basic tests with a single secondary index and a small number
1985	# of keys, then again with a larger number of keys.  (Note that
1986	# we can't go above 5000, since we use two items from our
1987	# 10K-word list for each key/data pair.)
1988	foreach n { 200 5000 } {
1989		foreach pm { btree hash recno frecno queue queueext } {
1990			foreach sm { dbtree dhash ddbtree ddhash btree hash } {
1991				foreach test $test_names(si) {
1992					if { $display } {
1993						puts -nonewline $outfile \
1994						    "eval $test {\[list\
1995						    $pm $sm $sm\]} $n ;"
1996						puts $outfile " verify_dir \
1997						    $testdir \"\" 1"
1998					}
1999					if { $run } {
2000			 			check_handles $outfile
2001						eval $test \
2002						    {[list $pm $sm $sm]} $n
2003						verify_dir $testdir "" 1
2004					}
2005				}
2006			}
2007		}
2008	}
2009
2010	# Run tests with 20 secondaries.
2011	foreach pm { btree hash } {
2012		set methlist [list $pm]
2013		for { set j 1 } { $j <= 20 } {incr j} {
2014			# XXX this should incorporate hash after #3726
2015			if { $j % 2 == 0 } {
2016				lappend methlist "dbtree"
2017			} else {
2018				lappend methlist "ddbtree"
2019			}
2020		}
2021		foreach test $test_names(si) {
2022			if { $display } {
2023				puts "eval $test {\[list $methlist\]} 500"
2024			}
2025			if { $run } {
2026				eval $test {$methlist} 500
2027			}
2028		}
2029	}
2030}
2031
2032# Run secondary index join test.  (There's no point in running
2033# this with both lengths, the primary is unhappy for now with fixed-
2034# length records (XXX), and we need unsorted dups in the secondaries.)
2035proc sijoin { {display 0} {run 1} {outfile stdout} } {
2036	foreach pm { btree hash recno } {
2037		if { $display } {
2038			foreach sm { btree hash } {
2039				puts $outfile "eval sijointest\
2040				    {\[list $pm $sm $sm\]} 1000"
2041			}
2042			puts $outfile "eval sijointest\
2043			    {\[list $pm btree hash\]} 1000"
2044			puts $outfile "eval sijointest\
2045			    {\[list $pm hash btree\]} 1000"
2046		}
2047		if { $run } {
2048			foreach sm { btree hash } {
2049				eval sijointest {[list $pm $sm $sm]} 1000
2050			}
2051			eval sijointest {[list $pm btree hash]} 1000
2052			eval sijointest {[list $pm hash btree]} 1000
2053		}
2054	}
2055}
2056
2057proc run { proc_suffix method {start 1} {stop 999} } {
2058	global test_names
2059
2060	switch -exact -- $proc_suffix {
2061		envmethod -
2062		method -
2063		recd -
2064		repmethod -
2065		reptest -
2066		secenv -
2067		secmethod {
2068			# Run_recd runs the recd tests, all others
2069			# run the "testxxx" tests.
2070			if { $proc_suffix == "recd" } {
2071				set testtype recd
2072			} else {
2073				set testtype test
2074			}
2075
2076			for { set i $start } { $i <= $stop } { incr i } {
2077				set name [format "%s%03d" $testtype $i]
2078				# If a test number is missing, silently skip
2079				# to next test; sparse numbering is allowed.
2080				if { [lsearch -exact $test_names($testtype) \
2081				    $name] == -1 } {
2082					continue
2083				}
2084				run_$proc_suffix $method $name
2085			}
2086		}
2087		default {
2088			puts "$proc_suffix is not set up with to be used with run"
2089		}
2090	}
2091}
2092
2093
2094# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
2095# of these is the default pagesize.  We don't want to run all the AM tests
2096# twice, so figure out what the default page size is, then return the
2097# other two.
2098proc get_test_pagesizes { } {
2099	# Create an in-memory database.
2100	set db [berkdb_open -create -btree]
2101	error_check_good gtp_create [is_valid_db $db] TRUE
2102	set statret [$db stat]
2103	set pgsz 0
2104	foreach pair $statret {
2105		set fld [lindex $pair 0]
2106		if { [string compare $fld {Page size}] == 0 } {
2107			set pgsz [lindex $pair 1]
2108		}
2109	}
2110
2111	error_check_good gtp_close [$db close] 0
2112
2113	error_check_bad gtp_pgsz $pgsz 0
2114	switch $pgsz {
2115		512 { return {8192 65536} }
2116		8192 { return {512 65536} }
2117		65536 { return {512 8192} }
2118		default { return {512 8192 65536} }
2119	}
2120	error_check_good NOTREACHED 0 1
2121}
2122
2123proc run_timed_once { timedtest args } {
2124	set start [timestamp -r]
2125	set ret [catch {
2126		eval $timedtest $args
2127		flush stdout
2128		flush stderr
2129	} res]
2130	set stop [timestamp -r]
2131	if { $ret != 0 } {
2132		global errorInfo
2133
2134		set fnl [string first "\n" $errorInfo]
2135		set theError [string range $errorInfo 0 [expr $fnl - 1]]
2136		if {[string first FAIL $errorInfo] == -1} {
2137			error "FAIL:[timestamp]\
2138			    run_timed: $timedtest: $theError"
2139		} else {
2140			error $theError;
2141		}
2142	}
2143	return [expr $stop - $start]
2144}
2145
2146proc run_timed { niter timedtest args } {
2147	if { $niter < 1 } {
2148		error "run_timed: Invalid number of iterations $niter"
2149	}
2150	set sum 0
2151	set e {}
2152	for { set i 1 } { $i <= $niter } { incr i } {
2153		set elapsed [eval run_timed_once $timedtest $args]
2154		lappend e $elapsed
2155		set sum [expr $sum + $elapsed]
2156		puts "Test $timedtest run $i completed: $elapsed seconds"
2157	}
2158	if { $niter > 1 } {
2159		set avg [expr $sum / $niter]
2160		puts "Average $timedtest time: $avg"
2161		puts "Raw $timedtest data: $e"
2162	}
2163}
2164