1# See the file LICENSE for redistribution information.
2#
3# Copyright (c) 1996-2009 Oracle.  All rights reserved.
4#
5# $Id$
6
7source ./include.tcl
8
9# Add the default Windows build sub-directory to the path, so that
10# the binaries can be found without copies.
11if {[string match Win* $tcl_platform(os)]} {
12	global env
13	global buildpath
14	set env(PATH) "$env(PATH)\;$buildpath"
15}
16
17# Load DB's TCL API.
18load $tcllib
19
20if { [file exists $testdir] != 1 } {
21	file mkdir $testdir
22}
23
24global __debug_print
25global __debug_on
26global __debug_test
27
28#
29# Test if utilities work to figure out the path.  Most systems
30# use ., but QNX has a problem with execvp of shell scripts which
31# causes it to break.
32#
33set stat [catch {exec ./db_printlog -?} ret]
34if { [string first "exec format error" $ret] != -1 } {
35	set util_path ./.libs
36} else {
37	set util_path .
38}
39set __debug_print 0
40set encrypt 0
41set old_encrypt 0
42set passwd test_passwd
43
44# Error stream that (should!) always go to the console, even if we're
45# redirecting to ALL.OUT.
46set consoleerr stderr
47
48set dict $test_path/wordlist
49set alphabet "abcdefghijklmnopqrstuvwxyz"
50set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
51
52# Random number seed.
53global rand_init
54set rand_init 11302005
55
56# Default record length for fixed record length access method(s)
57set fixed_len 20
58
59set recd_debug	0
60set log_log_record_types 0
61set ohandles {}
62
63# Normally, we're not running an all-tests-in-one-env run.  This matters
64# for error stream/error prefix settings in berkdb_open.
65global is_envmethod
66set is_envmethod 0
67
68#
69# Set when we're running a child process in a rep test.
70#
71global is_repchild
72set is_repchild 0
73
74# Set when we want to use replication test messaging that cannot
75# share an env -- for example, because the replication processes
76# are not all from the same BDB version.
77global noenv_messaging
78set noenv_messaging 0
79
80# For testing locker id wrap around.
81global lock_curid
82global lock_maxid
83set lock_curid 0
84set lock_maxid 2147483647
85global txn_curid
86global txn_maxid
87set txn_curid 2147483648
88set txn_maxid 4294967295
89
90# The variable one_test allows us to run all the permutations
91# of a test with run_all or run_std.
92global one_test
93if { [info exists one_test] != 1 } {
94	set one_test "ALL"
95}
96
97# If you call a test with the proc find_valid_methods, it will
98# return the list of methods for which it will run, instead of
99# actually running.
100global checking_valid_methods
101set checking_valid_methods 0
102global valid_methods
103set valid_methods { btree rbtree queue queueext recno frecno rrecno hash }
104
105# The variable test_recopts controls whether we open envs in
106# replication tests with the -recover flag.   The default is
107# to test with and without the flag, but to run a meaningful
108# subset of rep tests more quickly, rep_subset will randomly
109# pick one or the other.
110global test_recopts
111set test_recopts { "-recover" "" }
112
113# Set up any OS-specific values.
114source $test_path/testutils.tcl
115
116global tcl_platform
117set is_freebsd_test [string match FreeBSD $tcl_platform(os)]
118set is_hp_test [string match HP-UX $tcl_platform(os)]
119set is_linux_test [string match Linux $tcl_platform(os)]
120set is_qnx_test [string match QNX $tcl_platform(os)]
121set is_sunos_test [string match SunOS $tcl_platform(os)]
122set is_windows_test [string match Win* $tcl_platform(os)]
123set is_windows9x_test [string match "Windows 95" $tcl_platform(osVersion)]
124set is_je_test 0
125set upgrade_be [big_endian]
126global is_fat32
127set is_fat32 [string match FAT32 [lindex [file system check] 1]]
128global EXE BAT
129if { $is_windows_test == 1 } {
130	set EXE ".exe"
131	set BAT ".bat"
132} else {
133	set EXE ""
134	set BAT ""
135}
136
137if { $is_windows_test == 1 } {
138	set util_path "./$buildpath"
139}
140
141# This is where the test numbering and parameters now live.
142source $test_path/testparams.tcl
143source $test_path/db_reptest.tcl
144
145# Try to open an encrypted database.  If it fails, this release
146# doesn't support encryption, and encryption tests should be skipped.
147set has_crypto 1
148set stat [catch {set db [eval {berkdb_open_noerr \
149    -create -btree -encryptaes test_passwd} ] } result ]
150if { $stat != 0 } {
151	# Make sure it's the right error for a non-crypto release.
152	error_check_good non_crypto_release \
153	    [expr [is_substr $result "operation not supported"] || \
154	    [is_substr $result "invalid argument"]] 1
155	set has_crypto 0
156} else {
157	# It is a crypto release.  Get rid of the db, we don't need it.
158	error_check_good close_encrypted_db [$db close] 0
159}
160
161# Get the default page size of this system
162global default_pagesize
163set db [berkdb_open_noerr -create -btree]
164error_check_good "db open" [is_valid_db $db] TRUE
165set stat [catch {set default_pagesize [$db get_pagesize]} result]
166error_check_good "db get_pagesize" $stat 0
167error_check_good "db close" [$db close] 0
168
169# From here on out, test.tcl contains the procs that are used to
170# run all or part of the test suite.
171
172proc run_std { { testname ALL } args } {
173	global test_names
174	global one_test
175	global has_crypto
176	global valid_methods
177	source ./include.tcl
178
179	set one_test $testname
180	if { $one_test != "ALL" } {
181		# Source testparams again to adjust test_names.
182		source $test_path/testparams.tcl
183	}
184
185	set exflgs [eval extractflags $args]
186	set args [lindex $exflgs 0]
187	set flags [lindex $exflgs 1]
188
189	set display 1
190	set run 1
191	set am_only 0
192	set no_am 0
193	set std_only 1
194	set rflags {--}
195	foreach f $flags {
196		switch $f {
197			A {
198				set std_only 0
199			}
200			M {
201				set no_am 1
202				puts "run_std: all but access method tests."
203			}
204			m {
205				set am_only 1
206				puts "run_std: access method tests only."
207			}
208			n {
209				set display 1
210				set run 0
211				set rflags [linsert $rflags 0 "-n"]
212			}
213		}
214	}
215
216	if { $std_only == 1 } {
217		fileremove -f ALL.OUT
218
219		set o [open ALL.OUT a]
220		if { $run == 1 } {
221			puts -nonewline "Test suite run started at: "
222			puts [clock format [clock seconds] -format "%H:%M %D"]
223			puts [berkdb version -string]
224
225			puts -nonewline $o "Test suite run started at: "
226			puts $o [clock format [clock seconds] -format "%H:%M %D"]
227			puts $o [berkdb version -string]
228		}
229		close $o
230	}
231
232	set test_list {
233	{"environment"		"env"}
234	{"archive"		"archive"}
235	{"backup"		"backup"}
236	{"file operations"	"fop"}
237	{"locking"		"lock"}
238	{"logging"		"log"}
239	{"memory pool"		"memp"}
240	{"transaction"		"txn"}
241	{"deadlock detection"	"dead"}
242	{"subdatabase"		"sdb"}
243	{"byte-order"		"byte"}
244	{"recno backing file"	"rsrc"}
245	{"DBM interface"	"dbm"}
246	{"NDBM interface"	"ndbm"}
247	{"Hsearch interface"	"hsearch"}
248	{"secondary index"	"sindex"}
249	{"partition"		"partition"}
250	{"compression"		"compressed"}
251	{"replication manager" 	"repmgr"}
252	}
253
254	# If this is run_std only, run each rep test for a single
255	# access method.  If run_all, run for all access methods.
256	if { $std_only == 1 } {
257		lappend test_list {"replication"	"rep_subset"}
258	} else {
259		lappend test_list {"replication"	"rep_complete"}
260	}
261
262	# If release supports encryption, run security tests.
263	if { $has_crypto == 1 } {
264	        lappend test_list {"security"   "sec"}
265	}
266
267	if { $am_only == 0 } {
268		foreach pair $test_list {
269			set msg [lindex $pair 0]
270			set cmd [lindex $pair 1]
271			puts "Running $msg tests"
272			if [catch {exec $tclsh_path << \
273			    "global one_test; set one_test $one_test; \
274			    source $test_path/test.tcl; r $rflags $cmd" \
275			    >>& ALL.OUT } res] {
276				set o [open ALL.OUT a]
277				puts $o "FAIL: $cmd test: $res"
278				close $o
279			}
280		}
281
282		# Run recovery tests.
283		#
284		# XXX These too are broken into separate tclsh instantiations
285		# so we don't require so much memory, but I think it's cleaner
286		# and more useful to do it down inside proc r than here,
287		# since "r recd" gets done a lot and needs to work.
288		#
289		# Note that we still wrap the test in an exec so that
290		# its output goes to ALL.OUT.  run_recd will wrap each test
291		# so that both error streams go to stdout (which here goes
292		# to ALL.OUT);  information that run_recd wishes to print
293		# to the "real" stderr, but outside the wrapping for each test,
294		# such as which tests are being skipped, it can still send to
295		# stderr.
296		puts "Running recovery tests"
297		if [catch {
298		    exec $tclsh_path << \
299		    "global one_test; set one_test $one_test; \
300		    source $test_path/test.tcl; r $rflags recd" \
301			2>@ stderr >> ALL.OUT
302		    } res] {
303			set o [open ALL.OUT a]
304			puts $o "FAIL: recd tests: $res"
305			close $o
306		}
307
308		# Run join test
309		#
310		# XXX
311		# Broken up into separate tclsh instantiations so we don't
312		# require so much memory.
313		if { $one_test == "ALL" } {
314			puts "Running join test"
315			foreach test "join1 join2 join3 join4 join5 join6" {
316				if [catch {exec $tclsh_path << \
317				    "source $test_path/test.tcl; r $rflags $test" \
318				    >>& ALL.OUT } res] {
319					set o [open ALL.OUT a]
320					puts $o "FAIL: $test test: $res"
321					close $o
322				}
323			}
324		}
325	}
326
327	if { $no_am == 0 } {
328		# Access method tests.
329		#
330		# XXX
331		# Broken up into separate tclsh instantiations so we don't
332		# require so much memory.
333		foreach method $valid_methods {
334			puts "Running $method tests"
335			foreach test $test_names(test) {
336				if { $run == 0 } {
337					set o [open ALL.OUT a]
338					run_method \
339					    -$method $test $display $run $o
340					close $o
341				}
342				if { $run } {
343					if [catch {exec $tclsh_path << \
344					    "global one_test; \
345					    set one_test $one_test; \
346					    source $test_path/test.tcl; \
347					    run_method \
348					    -$method $test $display $run"\
349					    >>& ALL.OUT } res] {
350						set o [open ALL.OUT a]
351						puts $o "FAIL:$test $method: $res"
352						close $o
353					}
354				}
355			}
356		}
357	}
358
359	# If not actually running, no need to check for failure.
360	# If running in the context of the larger 'run_all' we don't
361	# check for failure here either.
362	if { $run == 0 || $std_only == 0 } {
363		return
364	}
365
366	set failed [check_output ALL.OUT]
367
368	set o [open ALL.OUT a]
369	if { $failed == 0 } {
370		puts "Regression Tests Succeeded"
371		puts $o "Regression Tests Succeeded"
372	} else {
373		puts "Regression Tests Failed"
374		puts "Check UNEXPECTED OUTPUT lines."
375		puts "Review ALL.OUT.x for details."
376		puts $o "Regression Tests Failed"
377	}
378
379	puts -nonewline "Test suite run completed at: "
380	puts [clock format [clock seconds] -format "%H:%M %D"]
381	puts -nonewline $o "Test suite run completed at: "
382	puts $o [clock format [clock seconds] -format "%H:%M %D"]
383	close $o
384}
385
386proc check_output { file } {
387	# These are all the acceptable patterns.
388	set pattern {(?x)
389		^[:space:]*$|
390		.*?wrap\.tcl.*|
391		.*?dbscript\.tcl.*|
392		.*?ddscript\.tcl.*|
393		.*?mpoolscript\.tcl.*|
394		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)$|
395		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\sCrashing$|
396		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s[p|P]rocesses\srunning:.*|
397		^\d\d:\d\d:\d\d\s\(\d\d:\d\d:\d\d\)\s5\sprocesses\srunning.*|
398		^\d:\sPut\s\d*\sstrings\srandom\soffsets.*|
399		^100.*|
400		^eval\s.*|
401		^exec\s.*|
402		^fileops:\s.*|
403		^jointest.*$|
404		^r\sarchive\s*|
405		^r\sbackup\s*|
406		^r\sdbm\s*|
407		^r\shsearch\s*|
408		^r\sndbm\s*|
409		^r\srpc\s*|
410		^run_recd:\s.*|
411		^run_reptest\s.*|
412		^run_rpcmethod:\s.*|
413		^run_secenv:\s.*|
414		^All\sprocesses\shave\sexited.$|
415		^Backuptest\s.*|
416		^Beginning\scycle\s\d$|
417		^Byteorder:.*|
418		^Child\sruns\scomplete\.\s\sParent\smodifies\sdata\.$|
419		^Deadlock\sdetector:\s\d*\sCheckpoint\sdaemon\s\d*$|
420		^Ending\srecord.*|
421		^Environment\s.*?specified;\s\sskipping\.$|
422		^Executing\srecord\s.*|
423		^Freeing\smutex\s.*|
424		^Join\stest:\.*|
425		^Method:\s.*|
426                ^Putting\s.*databases.*|
427		^Repl:\stest\d\d\d:.*|
428		^Repl:\ssdb\d\d\d:.*|
429		^Running\stest\ssdb.*|
430		^Running\stest\stest.*|
431                ^run_inmem_db\s.*rep.*|
432                ^run_inmem_log\s.*rep.*|
433                ^run_mixedmode_log\s.*rep.*|
434		^Script\swatcher\sprocess\s.*|
435		^Secondary\sindex\sjoin\s.*|
436		^Berkeley\sDB\s.*|
437		^Test\ssuite\srun\s.*|
438                ^Test\s.*rep.*|
439		^Unlinking\slog:\serror\smessage\sOK$|
440		^Verifying\s.*|
441		^\t*\.\.\.dbc->get.*$|
442		^\t*\.\.\.dbc->put.*$|
443		^\t*\.\.\.key\s\d.*$|
444		^\t*\.\.\.Skipping\sdbc.*|
445		^\t*and\s\d*\sduplicate\sduplicates\.$|
446		^\t*About\sto\srun\srecovery\s.*complete$|
447		^\t*Add\sa\sthird\sversion\s.*|
448		^\t*Archive[:\.].*|
449		^\t*Backuptest.*|
450		^\t*Bigfile[0-9][0-9][0-9].*|
451		^\t*Building\s.*|
452		^\t*closing\ssecondaries\.$|
453		^\t*Command\sexecuted\sand\s.*$|
454		^\t*DBM.*|
455		^\t*[d|D]ead[0-9][0-9][0-9].*|
456		^\t*Dump\/load\sof.*|
457		^\t*[e|E]nv[0-9][0-9][0-9].*|
458		^\t*Executing\scommand$|
459		^\t*Executing\stxn_.*|
460		^\t*File\srecd005\.\d\.db\sexecuted\sand\saborted\.$|
461		^\t*File\srecd005\.\d\.db\sexecuted\sand\scommitted\.$|
462		^\t*[f|F]op[0-9][0-9][0-9].*|
463		^\t*HSEARCH.*|
464		^\t*Initial\sCheckpoint$|
465		^\t*Iteration\s\d*:\sCheckpointing\.$|
466		^\t*Joining:\s.*|
467		^\t*Kid[1|2]\sabort\.\.\.complete$|
468		^\t*Kid[1|2]\scommit\.\.\.complete$|
469		^\t*[l|L]ock[0-9][0-9][0-9].*|
470		^\t*[l|L]og[0-9][0-9][0-9].*|
471		^\t*[m|M]emp[0-9][0-9][0-9].*|
472		^\t*[m|M]utex[0-9][0-9][0-9].*|
473		^\t*NDBM.*|
474		^\t*opening\ssecondaries\.$|
475		^\t*op_recover_rec:\sRunning\srecovery.*|
476		^\t*[r|R]ecd[0-9][0-9][0-9].*|
477		^\t*[r|R]ep[0-9][0-9][0-9].*|
478		^\t*[r|R]epmgr[0-9][0-9][0-9].*|
479		^\t*[r|R]ep_test.*|
480		^\t*[r|R]pc[0-9][0-9][0-9].*|
481		^\t*[r|R]src[0-9][0-9][0-9].*|
482		^\t*Recover\sfrom\sfirst\sdatabase$|
483		^\t*Recover\sfrom\ssecond\sdatabase$|
484		^\t*Remove\ssecond\sdb$|
485		^\t*Rep_verify.*|
486		^\t*Run_rpcmethod.*|
487		^\t*Running\srecovery\son\s.*|
488		^\t*[s|S]ec[0-9][0-9][0-9].*|
489		^\t*[s|S]i[0-9][0-9][0-9].*|
490		^\t*[s|S]ijoin.*|
491		^\t*Salvage\stests\sof.*|
492		^\t*sdb[0-9][0-9][0-9].*|
493		^\t*Skipping\s.*|
494		^\t*Subdb[0-9][0-9][0-9].*|
495		^\t*Subdbtest[0-9][0-9][0-9].*|
496		^\t*Syncing$|
497		^\t*[t|T]est[0-9][0-9][0-9].*|
498		^\t*[t|T]xn[0-9][0-9][0-9].*|
499		^\t*Txnscript.*|
500		^\t*Using\s.*?\senvironment\.$|
501		^\t*Verification\sof.*|
502		^\t*with\stransactions$}
503
504	set failed 0
505	set f [open $file r]
506	while { [gets $f line] >= 0 } {
507		if { [regexp $pattern $line] == 0 } {
508			puts -nonewline "UNEXPECTED OUTPUT: "
509			puts $line
510			set failed 1
511		}
512	}
513	close $f
514	return $failed
515}
516
517proc r { args } {
518	global test_names
519	global has_crypto
520	global rand_init
521	global one_test
522	global test_recopts
523	global checking_valid_methods
524
525	source ./include.tcl
526
527	set exflgs [eval extractflags $args]
528	set args [lindex $exflgs 0]
529	set flags [lindex $exflgs 1]
530
531	set display 1
532	set run 1
533	set saveflags "--"
534	foreach f $flags {
535		switch $f {
536			n {
537				set display 1
538				set run 0
539				set saveflags "-n $saveflags"
540			}
541		}
542	}
543
544	if {[catch {
545		set sub [ lindex $args 0 ]
546		set starttest [lindex $args 1]
547		switch $sub {
548			bigfile -
549			dead -
550			env -
551			lock -
552			log -
553			memp -
554			multi_repmgr -
555			mutex -
556			rsrc -
557			sdbtest -
558			txn {
559				if { $display } {
560					run_subsystem $sub 1 0
561				}
562				if { $run } {
563					run_subsystem $sub
564				}
565			}
566			byte {
567				if { $one_test == "ALL" } {
568					run_test byteorder $display $run
569				}
570			}
571			archive -
572			backup -
573			dbm -
574			hsearch -
575			ndbm -
576			shelltest {
577				if { $one_test == "ALL" } {
578					if { $display } { puts "eval $sub" }
579					if { $run } {
580						check_handles
581						eval $sub
582					}
583				}
584			}
585			compact -
586			elect -
587			inmemdb -
588			init -
589			fop {
590				foreach test $test_names($sub) {
591					eval run_test $test $display $run
592				}
593			}
594			compressed {
595				set tindex [lsearch $test_names(test) $starttest]
596				if { $tindex == -1 } {
597					set tindex 0
598				}
599				set clist [lrange $test_names(test) $tindex end]
600				foreach test $clist {
601					eval run_compressed btree $test $display $run
602				}
603			}
604			join {
605				eval r $saveflags join1
606				eval r $saveflags join2
607				eval r $saveflags join3
608				eval r $saveflags join4
609				eval r $saveflags join5
610				eval r $saveflags join6
611			}
612			join1 {
613				if { $display } { puts "eval jointest" }
614				if { $run } {
615					check_handles
616					eval jointest
617				}
618			}
619			joinbench {
620				puts "[timestamp]"
621				eval r $saveflags join1
622				eval r $saveflags join2
623				puts "[timestamp]"
624			}
625			join2 {
626				if { $display } { puts "eval jointest 512" }
627				if { $run } {
628					check_handles
629					eval jointest 512
630				}
631			}
632			join3 {
633				if { $display } {
634					puts "eval jointest 8192 0 -join_item"
635				}
636				if { $run } {
637					check_handles
638					eval jointest 8192 0 -join_item
639				}
640			}
641			join4 {
642				if { $display } { puts "eval jointest 8192 2" }
643				if { $run } {
644					check_handles
645					eval jointest 8192 2
646				}
647			}
648			join5 {
649				if { $display } { puts "eval jointest 8192 3" }
650				if { $run } {
651					check_handles
652					eval jointest 8192 3
653				}
654			}
655			join6 {
656				if { $display } { puts "eval jointest 512 3" }
657				if { $run } {
658					check_handles
659					eval jointest 512 3
660				}
661			}
662			partition {
663				foreach method { btree hash } {
664					foreach test "$test_names(recd)\
665					    $test_names(test)" {
666						run_range_partition\
667						    $test $method $display $run
668						run_partition_callback\
669						    $test $method $display $run
670					}
671				}
672			}
673			recd {
674				check_handles
675				eval {run_recds all $run $display} [lrange $args 1 end]
676			}
677			repmgr {
678				set tindex [lsearch $test_names(repmgr) $starttest]
679				if { $tindex == -1 } {
680					set tindex 0
681				}
682				set rlist [lrange $test_names(repmgr) $tindex end]
683				foreach test $rlist {
684					run_test $test $display $run
685				}
686			}
687			rep {
688				r rep_subset $starttest
689			}
690			# To run a subset of the complete rep tests, use
691			# rep_subset, which randomly picks an access type to
692			# use, and randomly picks whether to open envs with
693			# the -recover flag.
694			rep_subset {
695				if  { [is_partition_callback $args] == 1 } {
696					set nodump 1
697				} else {
698					set nodump 0
699				}
700				berkdb srand $rand_init
701				set tindex [lsearch $test_names(rep) $starttest]
702				if { $tindex == -1 } {
703					set tindex 0
704				}
705				set rlist [lrange $test_names(rep) $tindex end]
706				foreach test $rlist {
707					set random_recopt \
708					    [berkdb random_int 0 1]
709					if { $random_recopt == 1 } {
710						set test_recopts "-recover"
711					} else {
712						set test_recopts {""}
713					}
714
715					set method_list \
716					    [find_valid_methods $test]
717					set list_length \
718					    [expr [llength $method_list] - 1]
719					set method_index \
720					    [berkdb random_int 0 $list_length]
721					set rand_method \
722					    [lindex $method_list $method_index]
723
724					if { $display } {
725						puts "eval $test $rand_method; \
726						    verify_dir \
727						    $testdir \"\" 1 0 $nodump; \
728						    salvage_dir $testdir"
729					}
730					if { $run } {
731				 		check_handles
732						eval $test $rand_method
733						verify_dir $testdir "" 1 0 $nodump
734						salvage_dir $testdir
735					}
736				}
737				if { $one_test == "ALL" } {
738					if { $display } {
739						#puts "basic_db_reptest"
740						#puts "basic_db_reptest 1"
741					}
742					if { $run } {
743						#basic_db_reptest
744						#basic_db_reptest 1
745					}
746				}
747				set test_recopts { "-recover" "" }
748			}
749			rep_complete {
750				set tindex [lsearch $test_names(rep) $starttest]
751				if { $tindex == -1 } {
752					set tindex 0
753				}
754				set rlist [lrange $test_names(rep) $tindex end]
755				foreach test $rlist {
756					run_test $test $display $run
757				}
758				if { $one_test == "ALL" } {
759					if { $display } {
760						#puts "basic_db_reptest"
761						#puts "basic_db_reptest 1"
762					}
763					if { $run } {
764						#basic_db_reptest
765						#basic_db_reptest 1
766					}
767				}
768			}
769			repmethod {
770				# We seed the random number generator here
771				# instead of in run_repmethod so that we
772				# aren't always reusing the first few
773				# responses from random_int.
774				#
775				berkdb srand $rand_init
776				foreach sub { test sdb } {
777					foreach test $test_names($sub) {
778						eval run_test run_repmethod \
779						    $display $run $test
780					}
781				}
782			}
783			rpc {
784				if { $one_test == "ALL" } {
785					if { $display } { puts "r $sub" }
786					global BAT EXE rpc_svc svc_list
787					global rpc_svc svc_list is_je_test
788					set old_rpc_src $rpc_svc
789					foreach rpc_svc $svc_list {
790						if { $rpc_svc == "berkeley_dbje_svc" } {
791							set old_util_path $util_path
792							set util_path $je_root/dist
793							set is_je_test 1
794						}
795
796						if { !$run || \
797				      ![file exist $util_path/$rpc_svc$BAT] || \
798				      ![file exist $util_path/$rpc_svc$EXE] } {
799							continue
800						}
801
802						run_subsystem rpc
803						if { [catch {run_rpcmethod -txn} ret] != 0 } {
804							puts $ret
805						}
806
807						if { $is_je_test } {
808							check_handles
809							eval run_rpcmethod -btree
810							verify_dir $testdir "" 1
811							salvage_dir $testdir
812						} else {
813							run_test run_rpcmethod $display $run
814						}
815
816						if { $is_je_test } {
817							set util_path $old_util_path
818							set is_je_test 0
819						}
820
821					}
822					set rpc_svc $old_rpc_src
823				}
824			}
825			sec {
826				# Skip secure mode tests if release
827				# does not support encryption.
828				if { $has_crypto == 0 } {
829					return
830				}
831				if { $display } {
832					run_subsystem $sub 1 0
833				}
834				if { $run } {
835					run_subsystem $sub 0 1
836				}
837			}
838			secmethod {
839				# Skip secure mode tests if release
840				# does not support encryption.
841				if { $has_crypto == 0 } {
842					return
843				}
844				foreach test $test_names(test) {
845					eval run_test run_secmethod \
846					    $display $run $test
847					eval run_test run_secenv \
848					    $display $run $test
849				}
850			}
851			sdb {
852				if { $one_test == "ALL" } {
853					if { $display } {
854						run_subsystem sdbtest 1 0
855					}
856					if { $run } {
857						run_subsystem sdbtest 0 1
858					}
859				}
860				foreach test $test_names(sdb) {
861					eval run_test $test $display $run
862				}
863			}
864			sindex {
865				if { $one_test == "ALL" } {
866					if { $display } {
867						sindex 1 0
868						sijoin 1 0
869					}
870					if { $run } {
871						sindex 0 1
872						sijoin 0 1
873					}
874				}
875			}
876			btree -
877			rbtree -
878			hash -
879			iqueue -
880			iqueueext -
881			queue -
882			queueext -
883			recno -
884			frecno -
885			rrecno {
886				foreach test $test_names(test) {
887					eval run_method [lindex $args 0] $test \
888					    $display $run stdout [lrange $args 1 end]
889				}
890			}
891
892			default {
893				error \
894				    "FAIL:[timestamp] r: $args: unknown command"
895			}
896		}
897		flush stdout
898		flush stderr
899	} res] != 0} {
900		global errorInfo;
901		set fnl [string first "\n" $errorInfo]
902		set theError [string range $errorInfo 0 [expr $fnl - 1]]
903		if {[string first FAIL $errorInfo] == -1} {
904			error "FAIL:[timestamp] r: $args: $theError"
905		} else {
906			error $theError;
907		}
908	}
909}
910
911proc run_subsystem { sub { display 0 } { run 1} } {
912	global test_names
913
914	if { [info exists test_names($sub)] != 1 } {
915		puts stderr "Subsystem $sub has no tests specified in\
916		    testparams.tcl; skipping."
917		return
918	}
919	foreach test $test_names($sub) {
920		if { $display } {
921			puts "eval $test"
922		}
923		if { $run } {
924			check_handles
925			if {[catch {eval $test} ret] != 0 } {
926				puts "FAIL: run_subsystem: $sub $test: \
927				    $ret"
928			}
929		}
930	}
931}
932
933proc run_test { test {display 0} {run 1} args } {
934	source ./include.tcl
935	global valid_methods
936
937	foreach method $valid_methods {
938		if { $display } {
939			puts "eval $test -$method $args; \
940			    verify_dir $testdir \"\" 1; \
941			    salvage_dir $testdir"
942		}
943		if  { [is_partition_callback $args] == 1 } {
944			set nodump 1
945		} else {
946			set nodump 0
947		}
948		if { $run } {
949	 		check_handles
950			eval {$test -$method} $args
951			verify_dir $testdir "" 1 0 $nodump
952			salvage_dir $testdir
953		}
954	}
955}
956
957proc run_method { method test {display 0} {run 1} \
958    { outfile stdout } args } {
959	global __debug_on
960	global __debug_print
961	global __debug_test
962	global test_names
963	global parms
964	source ./include.tcl
965
966	if  { [is_partition_callback $args] == 1 } {
967		set nodump  1
968	} else {
969		set nodump  0
970	}
971
972	if {[catch {
973		if { $display } {
974			puts -nonewline $outfile "eval \{ $test \} $method"
975			puts -nonewline $outfile " $parms($test) { $args }"
976			puts -nonewline $outfile " ; verify_dir $testdir \"\" 1 0 $nodump"
977			puts $outfile " ; salvage_dir $testdir"
978		}
979		if { $run } {
980			check_handles $outfile
981			puts $outfile "[timestamp]"
982			eval {$test} $method $parms($test) $args
983			if { $__debug_print != 0 } {
984				puts $outfile ""
985			}
986			# Verify all databases the test leaves behind
987			verify_dir $testdir "" 1 0 $nodump
988			if { $__debug_on != 0 } {
989				debug $__debug_test
990			}
991			salvage_dir $testdir
992		}
993		flush stdout
994		flush stderr
995	} res] != 0} {
996		global errorInfo;
997
998		set fnl [string first "\n" $errorInfo]
999		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1000		if {[string first FAIL $errorInfo] == -1} {
1001			error "FAIL:[timestamp]\
1002			    run_method: $method $test: $theError"
1003		} else {
1004			error $theError;
1005		}
1006	}
1007}
1008
1009proc run_rpcmethod { method {largs ""} } {
1010	global __debug_on
1011	global __debug_print
1012	global __debug_test
1013	global rpc_tests
1014	global parms
1015	global is_envmethod
1016	global rpc_svc
1017	source ./include.tcl
1018
1019	puts "run_rpcmethod: $method $largs using $rpc_svc"
1020
1021	set save_largs $largs
1022	set dpid [rpc_server_start]
1023	puts "\tRun_rpcmethod.a: started server, pid $dpid"
1024	remote_cleanup $rpc_server $rpc_testdir $testdir
1025
1026	set home [file tail $rpc_testdir]
1027
1028	set is_envmethod 1
1029	set use_txn 0
1030	if { [string first "txn" $method] != -1 } {
1031		set use_txn 1
1032	}
1033	if { $use_txn == 1 } {
1034		set ntxns 32
1035		set i 1
1036		check_handles
1037		remote_cleanup $rpc_server $rpc_testdir $testdir
1038		set env [eval {berkdb_env -create -mode 0644 -home $home \
1039		    -server $rpc_server -client_timeout 10000} -txn]
1040		error_check_good env_open [is_valid_env $env] TRUE
1041
1042		set stat [catch {eval txn001_suba $ntxns $env} res]
1043		if { $stat == 0 } {
1044			set stat [catch {eval txn001_subb $ntxns $env} res]
1045		}
1046		set stat [catch {eval txn003} res]
1047		error_check_good envclose [$env close] 0
1048	} else {
1049		foreach test $rpc_tests($rpc_svc) {
1050			set stat [catch {
1051				check_handles
1052				remote_cleanup $rpc_server $rpc_testdir $testdir
1053				#
1054				# Set server cachesize to 128Mb.  Otherwise
1055				# some tests won't fit (like test084 -btree).
1056				#
1057				set env [eval {berkdb_env -create -mode 0644 \
1058				    -home $home -server $rpc_server \
1059				    -client_timeout 10000 \
1060				    -cachesize {0 134217728 1}}]
1061				error_check_good env_open \
1062				    [is_valid_env $env] TRUE
1063				set largs $save_largs
1064				append largs " -env $env "
1065
1066				puts "[timestamp]"
1067				puts "Running test $test with RPC service $rpc_svc"
1068				puts "eval $test $method $parms($test) $largs"
1069				eval $test $method $parms($test) $largs
1070				if { $__debug_print != 0 } {
1071					puts ""
1072				}
1073				if { $__debug_on != 0 } {
1074					debug $__debug_test
1075				}
1076				flush stdout
1077				flush stderr
1078				error_check_good envclose [$env close] 0
1079				set env ""
1080			} res]
1081
1082			if { $stat != 0} {
1083				global errorInfo;
1084
1085				puts "$res"
1086
1087				set fnl [string first "\n" $errorInfo]
1088				set theError [string range $errorInfo 0 [expr $fnl - 1]]
1089				if {[string first FAIL $errorInfo] == -1} {
1090					puts "FAIL:[timestamp]\
1091					    run_rpcmethod: $method $test: $errorInfo"
1092				} else {
1093					puts $theError;
1094				}
1095
1096				catch { $env close } ignore
1097				set env ""
1098				tclkill $dpid
1099				set dpid [rpc_server_start]
1100			}
1101		}
1102	}
1103	set is_envmethod 0
1104	tclkill $dpid
1105}
1106
1107proc run_rpcnoserver { method {largs ""} } {
1108	global __debug_on
1109	global __debug_print
1110	global __debug_test
1111	global test_names
1112	global parms
1113	global is_envmethod
1114	source ./include.tcl
1115
1116	puts "run_rpcnoserver: $method $largs"
1117
1118	set save_largs $largs
1119	remote_cleanup $rpc_server $rpc_testdir $testdir
1120	set home [file tail $rpc_testdir]
1121
1122	set is_envmethod 1
1123	set use_txn 0
1124	if { [string first "txn" $method] != -1 } {
1125		set use_txn 1
1126	}
1127	if { $use_txn == 1 } {
1128		set ntxns 32
1129		set i 1
1130		check_handles
1131		remote_cleanup $rpc_server $rpc_testdir $testdir
1132		set env [eval {berkdb_env -create -mode 0644 -home $home \
1133		    -server $rpc_server -client_timeout 10000} -txn]
1134		error_check_good env_open [is_valid_env $env] TRUE
1135
1136		set stat [catch {eval txn001_suba $ntxns $env} res]
1137		if { $stat == 0 } {
1138			set stat [catch {eval txn001_subb $ntxns $env} res]
1139		}
1140		error_check_good envclose [$env close] 0
1141	} else {
1142		set stat [catch {
1143			foreach test $test_names {
1144				check_handles
1145				if { [info exists parms($test)] != 1 } {
1146					puts stderr "$test disabled in \
1147					    testparams.tcl; skipping."
1148					continue
1149				}
1150				remote_cleanup $rpc_server $rpc_testdir $testdir
1151				#
1152				# Set server cachesize to 1Mb.  Otherwise some
1153				# tests won't fit (like test084 -btree).
1154				#
1155				set env [eval {berkdb_env -create -mode 0644 \
1156				    -home $home -server $rpc_server \
1157				    -client_timeout 10000 \
1158				    -cachesize {0 1048576 1} }]
1159				error_check_good env_open \
1160				    [is_valid_env $env] TRUE
1161				append largs " -env $env "
1162
1163				puts "[timestamp]"
1164				eval $test $method $parms($test) $largs
1165				if { $__debug_print != 0 } {
1166					puts ""
1167				}
1168				if { $__debug_on != 0 } {
1169					debug $__debug_test
1170				}
1171				flush stdout
1172				flush stderr
1173				set largs $save_largs
1174				error_check_good envclose [$env close] 0
1175			}
1176		} res]
1177	}
1178	if { $stat != 0} {
1179		global errorInfo;
1180
1181		set fnl [string first "\n" $errorInfo]
1182		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1183		if {[string first FAIL $errorInfo] == -1} {
1184			error "FAIL:[timestamp]\
1185			    run_rpcnoserver: $method $i: $theError"
1186		} else {
1187			error $theError;
1188		}
1189	set is_envmethod 0
1190	}
1191
1192}
1193
1194# Run a testNNN or recdNNN test with range partitioning.
1195proc run_range_partition { test method {display 0} {run 1}\
1196    {outfile stdout} args } {
1197
1198	# The only allowed access method for range partitioning is btree.
1199	if { [is_btree $method] == 0 } {
1200		if { $display == 0 } {
1201			puts "Skipping range partition\
1202			    tests for method $method"
1203		}
1204		return
1205	}
1206
1207	# If we've passed in explicit partitioning args, use them;
1208	# otherwise set them.  This particular selection hits some
1209	# interesting cases where we set the key to "key".
1210	set largs $args
1211	if { [is_partitioned $args] == 0 } {
1212		lappend largs  -partition {ab cd key key1 zzz}
1213	}
1214
1215	if { [string first recd $test] == 0 } {
1216		eval {run_recd $method $test $run $display} $largs
1217	} elseif { [string first test $test] == 0 } {
1218		eval {run_method $method $test $display $run $outfile} $largs
1219	} else {
1220		puts "Skipping test $test with range partitioning."
1221	}
1222}
1223
1224# Run a testNNN or recdNNN test with partition callbacks.
1225proc run_partition_callback { test method {display 0} {run 1}\
1226    {outfile stdout} args } {
1227
1228	# The only allowed access methods are btree and hash.
1229	if { [is_btree $method] == 0 && [is_hash $method] == 0 } {
1230		if { $display == 0 } {
1231			puts "Skipping partition callback tests\
1232			    for method $method"
1233		}
1234		return
1235	}
1236
1237	# If we've passed in explicit partitioning args, use them;
1238	# otherwise set them.
1239	set largs $args
1240	if { [is_partition_callback $args] == 0 } {
1241		lappend largs  -partition_callback 5 part
1242	}
1243
1244	if { [string first recd $test] == 0 } {
1245		eval {run_recd $method $test $run $display} $largs
1246	} elseif { [string first test $test] == 0 } {
1247		eval {run_method $method $test $display $run $outfile} $largs
1248	} else {
1249		puts "Skipping test $test with partition callbacks."
1250	}
1251}
1252
1253#
1254# Run method tests for btree only using compression.
1255#
1256proc run_compressed { method test {display 0} {run 1} \
1257    { outfile stdout } args } {
1258
1259	if { [is_btree $method] == 0 } {
1260		puts "Skipping compression test for method $method."
1261		return
1262	}
1263
1264	set largs $args
1265	append largs " -compress "
1266	eval run_method $method $test $display $run $outfile $largs
1267}
1268
1269#
1270# Run method tests in secure mode.
1271#
1272proc run_secmethod { method test {display 0} {run 1} \
1273    { outfile stdout } args } {
1274	global passwd
1275	global has_crypto
1276
1277	# Skip secure mode tests if release does not support encryption.
1278	if { $has_crypto == 0 } {
1279		return
1280	}
1281
1282	set largs $args
1283	append largs " -encryptaes $passwd "
1284	eval run_method $method $test $display $run $outfile $largs
1285}
1286
1287#
1288# Run method tests each in its own, new secure environment.
1289#
1290proc run_secenv { method test {largs ""} } {
1291	global __debug_on
1292	global __debug_print
1293	global __debug_test
1294	global is_envmethod
1295	global has_crypto
1296	global test_names
1297	global parms
1298	global passwd
1299	source ./include.tcl
1300
1301	# Skip secure mode tests if release does not support encryption.
1302	if { $has_crypto == 0 } {
1303		return
1304	}
1305
1306	puts "run_secenv: $method $test $largs"
1307
1308	set save_largs $largs
1309	env_cleanup $testdir
1310	set is_envmethod 1
1311	set stat [catch {
1312		check_handles
1313		set env [eval {berkdb_env -create -mode 0644 -home $testdir \
1314		    -encryptaes $passwd -pagesize 512 -cachesize {0 4194304 1}}]
1315		error_check_good env_open [is_valid_env $env] TRUE
1316		append largs " -env $env "
1317
1318		puts "[timestamp]"
1319		if { [info exists parms($test)] != 1 } {
1320			puts stderr "$test disabled in\
1321			    testparams.tcl; skipping."
1322			continue
1323		}
1324
1325		#
1326		# Run each test multiple times in the secure env.
1327		# Once with a secure env + clear database
1328		# Once with a secure env + secure database
1329		#
1330		eval $test $method $parms($test) $largs
1331		append largs " -encrypt "
1332		eval $test $method $parms($test) $largs
1333
1334		if { $__debug_print != 0 } {
1335			puts ""
1336		}
1337		if { $__debug_on != 0 } {
1338			debug $__debug_test
1339		}
1340		flush stdout
1341		flush stderr
1342		set largs $save_largs
1343		error_check_good envclose [$env close] 0
1344		error_check_good envremove [berkdb envremove \
1345		    -home $testdir -encryptaes $passwd] 0
1346	} res]
1347	if { $stat != 0} {
1348		global errorInfo;
1349
1350		set fnl [string first "\n" $errorInfo]
1351		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1352		if {[string first FAIL $errorInfo] == -1} {
1353			error "FAIL:[timestamp]\
1354			    run_secenv: $method $test: $theError"
1355		} else {
1356			error $theError;
1357		}
1358	set is_envmethod 0
1359	}
1360
1361}
1362
1363#
1364# Run replication method tests in master and client env.
1365#
1366proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
1367    {do_sec 0} {do_oob 0} {largs "" } } {
1368	source ./include.tcl
1369	if { $is_windows9x_test == 1 } {
1370		puts "Skipping replication test on Win 9x platform."
1371		return
1372	}
1373
1374	global __debug_on
1375	global __debug_print
1376	global __debug_test
1377	global is_envmethod
1378	global parms
1379	global passwd
1380	global has_crypto
1381
1382	puts "run_reptest \
1383	    $method $test $droppct $nclients $do_del $do_sec $do_oob $largs"
1384
1385	env_cleanup $testdir
1386	set is_envmethod 1
1387	set stat [catch {
1388		if { $do_sec && $has_crypto } {
1389			set envargs "-encryptaes $passwd"
1390			append largs " -encrypt "
1391		} else {
1392			set envargs ""
1393		}
1394		check_handles
1395		#
1396		# This will set up the master and client envs
1397		# and will return us the args to pass to the
1398		# test.
1399
1400		set largs [repl_envsetup \
1401		    $envargs $largs $test $nclients $droppct $do_oob]
1402
1403		puts "[timestamp]"
1404		if { [info exists parms($test)] != 1 } {
1405			puts stderr "$test disabled in\
1406			    testparams.tcl; skipping."
1407			continue
1408		}
1409
1410		puts -nonewline \
1411		    "Repl: $test: dropping $droppct%, $nclients clients "
1412		if { $do_del } {
1413			puts -nonewline " with delete verification;"
1414		} else {
1415			puts -nonewline " no delete verification;"
1416		}
1417		if { $do_sec } {
1418			puts -nonewline " with security;"
1419		} else {
1420			puts -nonewline " no security;"
1421		}
1422		if { $do_oob } {
1423			puts -nonewline " with out-of-order msgs;"
1424		} else {
1425			puts -nonewline " no out-of-order msgs;"
1426		}
1427		puts ""
1428
1429		eval $test $method $parms($test) $largs
1430
1431		if { $__debug_print != 0 } {
1432			puts ""
1433		}
1434		if { $__debug_on != 0 } {
1435			debug $__debug_test
1436		}
1437		flush stdout
1438		flush stderr
1439		repl_envprocq $test $nclients $do_oob
1440		repl_envver0 $test $method $nclients
1441		if { $do_del } {
1442			repl_verdel $test $method $nclients
1443		}
1444		repl_envclose $test $envargs
1445	} res]
1446	if { $stat != 0} {
1447		global errorInfo;
1448
1449		set fnl [string first "\n" $errorInfo]
1450		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1451		if {[string first FAIL $errorInfo] == -1} {
1452			error "FAIL:[timestamp]\
1453			    run_reptest: $method $test: $theError"
1454		} else {
1455			error $theError;
1456		}
1457	}
1458	set is_envmethod 0
1459}
1460
1461#
1462# Run replication method tests in master and client env.
1463#
1464proc run_repmethod { method test {numcl 0} {display 0} {run 1} \
1465    {outfile stdout} {largs ""} } {
1466	source ./include.tcl
1467	if { $is_windows9x_test == 1 } {
1468		puts "Skipping replication test on Win 9x platform."
1469		return
1470	}
1471
1472	global __debug_on
1473	global __debug_print
1474	global __debug_test
1475	global is_envmethod
1476	global test_names
1477	global parms
1478	global has_crypto
1479	global passwd
1480
1481	set save_largs $largs
1482	env_cleanup $testdir
1483
1484	# Use an array for number of clients because we really don't
1485	# want to evenly-weight all numbers of clients.  Favor smaller
1486	# numbers but test more clients occasionally.
1487	set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
1488	set drop_len [expr [llength $drop_list] - 1]
1489	set client_list { 1 1 2 1 1 1 2 2 3 1 }
1490	set cl_len [expr [llength $client_list] - 1]
1491
1492	if { $numcl == 0 } {
1493		set clindex [berkdb random_int 0 $cl_len]
1494		set nclients [lindex $client_list $clindex]
1495	} else {
1496		set nclients $numcl
1497	}
1498	set drindex [berkdb random_int 0 $drop_len]
1499	set droppct [lindex $drop_list $drindex]
1500
1501	# Do not drop messages on Windows.  Since we can't set
1502	# re-request times with less than millisecond precision,
1503	# dropping messages will cause test failures.
1504	if { $is_windows_test == 1 } {
1505		set droppct 0
1506	}
1507
1508 	set do_sec [berkdb random_int 0 1]
1509	set do_oob [berkdb random_int 0 1]
1510	set do_del [berkdb random_int 0 1]
1511
1512	if { $display == 1 } {
1513		puts $outfile "eval run_reptest $method $test $droppct \
1514		    $nclients $do_del $do_sec $do_oob $largs"
1515	}
1516	if { $run == 1 } {
1517		run_reptest $method $test $droppct $nclients $do_del \
1518		    $do_sec $do_oob $largs
1519	}
1520}
1521
1522#
1523# Run method tests, each in its own, new environment.  (As opposed to
1524# run_envmethod1 which runs all the tests in a single environment.)
1525#
1526proc run_envmethod { method test {display 0} {run 1} {outfile stdout} \
1527    { largs "" } } {
1528	global __debug_on
1529	global __debug_print
1530	global __debug_test
1531	global is_envmethod
1532	global test_names
1533	global parms
1534	source ./include.tcl
1535
1536	set save_largs $largs
1537	set envargs ""
1538
1539	# Enlarge the logging region by default - sdb004 needs this because
1540	# it uses very long subdb names, and the names are stored in the
1541	# env region.
1542	set logargs " -log_regionmax 2057152 "
1543
1544	# Enlarge the cache by default - some compaction tests need it.
1545	set cacheargs "-cachesize {0 4194304 1} -pagesize 512"
1546	env_cleanup $testdir
1547
1548	if { $display == 1 } {
1549		puts $outfile "eval run_envmethod $method \
1550		    $test 0 1 stdout $largs"
1551	}
1552
1553	# To run a normal test using system memory, call run_envmethod
1554	# with the flag -shm.
1555	set sindex [lsearch -exact $largs "-shm"]
1556	if { $sindex >= 0 } {
1557		if { [mem_chk " -system_mem -shm_key 1 "] == 1 } {
1558			break
1559		} else {
1560			append envargs " -system_mem -shm_key 1 "
1561			set largs [lreplace $largs $sindex $sindex]
1562		}
1563	}
1564
1565	set sindex [lsearch -exact $largs "-log_max"]
1566	if { $sindex >= 0 } {
1567		append envargs " -log_max 100000 "
1568		set largs [lreplace $largs $sindex $sindex]
1569	}
1570
1571	# Test for -thread option and pass to berkdb_env open.  Leave in
1572	# $largs because -thread can also be passed to an individual
1573	# test as an arg.  Double the number of lockers because a threaded
1574	# env requires more than an ordinary env.
1575	if { [lsearch -exact $largs "-thread"] != -1 } {
1576		append envargs " -thread -lock_max_lockers 2000 "
1577	}
1578
1579	# Test for -alloc option and pass to berkdb_env open only.
1580	# Remove from largs because -alloc is not an allowed test arg.
1581	set aindex [lsearch -exact $largs "-alloc"]
1582	if { $aindex >= 0 } {
1583		append envargs " -alloc "
1584		set largs [lreplace $largs $aindex $aindex]
1585	}
1586
1587	# We raise the number of locks and objects - there are a few
1588	# compaction tests that require a large number.
1589	set lockargs " -lock_max_locks 40000 -lock_max_objects 20000 "
1590
1591	if { $run == 1 } {
1592		set is_envmethod 1
1593		set stat [catch {
1594			check_handles
1595			set env [eval {berkdb_env -create -txn -mode 0644 \
1596			    -home $testdir} $logargs $cacheargs $lockargs $envargs]
1597			error_check_good env_open [is_valid_env $env] TRUE
1598			append largs " -env $env "
1599
1600			puts "[timestamp]"
1601			if { [info exists parms($test)] != 1 } {
1602				puts stderr "$test disabled in\
1603				    testparams.tcl; skipping."
1604				continue
1605			}
1606			eval $test $method $parms($test) $largs
1607
1608			if { $__debug_print != 0 } {
1609				puts ""
1610			}
1611			if { $__debug_on != 0 } {
1612				debug $__debug_test
1613			}
1614			flush stdout
1615			flush stderr
1616			set largs $save_largs
1617			error_check_good envclose [$env close] 0
1618#			error_check_good envremove [berkdb envremove \
1619#			    -home $testdir] 0
1620		} res]
1621		if { $stat != 0} {
1622			global errorInfo;
1623
1624			set fnl [string first "\n" $errorInfo]
1625			set theError [string range $errorInfo 0 [expr $fnl - 1]]
1626			if {[string first FAIL $errorInfo] == -1} {
1627				error "FAIL:[timestamp]\
1628				    run_envmethod: $method $test: $theError"
1629			} else {
1630				error $theError;
1631			}
1632		}
1633		set is_envmethod 0
1634	}
1635}
1636
1637proc run_compact { method } {
1638	source ./include.tcl
1639	for {set tnum 111} {$tnum <= 115} {incr tnum} {
1640		run_envmethod $method test$tnum 0 1 stdout -log_max
1641
1642		puts "\tTest$tnum: Test Recovery"
1643		set env1 [eval  berkdb env -create -txn \
1644		    -recover_fatal -home $testdir]
1645		error_check_good env_close [$env1 close] 0
1646		error_check_good verify_dir \
1647		    [verify_dir $testdir "" 0 0 1 ] 0
1648		puts "\tTest$tnum: Remove db and test Recovery"
1649		exec sh -c "rm -f $testdir/*.db"
1650		set env1 [eval  berkdb env -create -txn \
1651		    -recover_fatal -home $testdir]
1652		error_check_good env_close [$env1 close] 0
1653		error_check_good verify_dir \
1654		    [verify_dir $testdir "" 0 0 1 ] 0
1655	}
1656}
1657
1658proc run_recd { method test {run 1} {display 0} args } {
1659	global __debug_on
1660	global __debug_print
1661	global __debug_test
1662	global parms
1663	global test_names
1664	global log_log_record_types
1665	global gen_upgrade_log
1666	global upgrade_be
1667	global upgrade_dir
1668	global upgrade_method
1669	global upgrade_name
1670	source ./include.tcl
1671
1672	if { $run == 1 } {
1673		puts "run_recd: $method $test $parms($test) $args"
1674	}
1675	if {[catch {
1676		if { $display } {
1677			puts "eval { $test } $method $parms($test) { $args }"
1678		}
1679		if { $run } {
1680			check_handles
1681			set upgrade_method $method
1682			set upgrade_name $test
1683			puts "[timestamp]"
1684			# By redirecting stdout to stdout, we make exec
1685			# print output rather than simply returning it.
1686			# By redirecting stderr to stdout too, we make
1687			# sure everything winds up in the ALL.OUT file.
1688			set ret [catch { exec $tclsh_path << \
1689			    "source $test_path/test.tcl; \
1690			    set log_log_record_types $log_log_record_types;\
1691			    set gen_upgrade_log $gen_upgrade_log;\
1692			    set upgrade_be $upgrade_be; \
1693			    set upgrade_dir $upgrade_dir; \
1694			    set upgrade_method $upgrade_method; \
1695			    set upgrade_name $upgrade_name; \
1696			    eval { $test } $method $parms($test) {$args}" \
1697			    >&@ stdout
1698			} res]
1699
1700			# Don't die if the test failed;  we want
1701			# to just proceed.
1702			if { $ret != 0 } {
1703				puts "FAIL:[timestamp] $res"
1704			}
1705
1706			if { $__debug_print != 0 } {
1707				puts ""
1708			}
1709			if { $__debug_on != 0 } {
1710				debug $__debug_test
1711			}
1712			flush stdout
1713			flush stderr
1714		}
1715	} res] != 0} {
1716		global errorInfo;
1717
1718		set fnl [string first "\n" $errorInfo]
1719		set theError [string range $errorInfo 0 [expr $fnl - 1]]
1720		if {[string first FAIL $errorInfo] == -1} {
1721			error "FAIL:[timestamp]\
1722			    run_recd: $method: $theError"
1723		} else {
1724			error $theError;
1725		}
1726	}
1727}
1728
1729proc recds {method args} {
1730	eval {run_recds $method 1 0} $args
1731}
1732
1733proc run_recds {{run_methods "all"} {run 1} {display 0} args } {
1734	source ./include.tcl
1735	global log_log_record_types
1736	global test_names
1737	global gen_upgrade_log
1738	global encrypt
1739	global valid_methods
1740
1741	set log_log_record_types 1
1742	set run_zero 0
1743	if { $run_methods == "all" } {
1744		set run_methods  $valid_methods
1745		set run_zero 1
1746	}
1747	logtrack_init
1748
1749	# Define a small set of tests to run with log file zeroing.
1750	set zero_log_tests \
1751	    {recd001 recd002 recd003 recd004 recd005 recd006 recd007}
1752
1753	foreach method $run_methods {
1754		check_handles
1755#set test_names(recd) "recd005 recd017"
1756		foreach test $test_names(recd) {
1757			# Skip recd017 for non-crypto upgrade testing.
1758			# Run only recd017 for crypto upgrade testing.
1759			if { $gen_upgrade_log == 1 && $test == "recd017" && \
1760			    $encrypt == 0 } {
1761				puts "Skipping recd017 for non-crypto run."
1762				continue
1763			}
1764			if { $gen_upgrade_log == 1 && $test != "recd017" && \
1765			    $encrypt == 1 } {
1766				puts "Skipping $test for crypto run."
1767				continue
1768			}
1769			if { [catch {eval {run_recd $method $test $run \
1770			    $display} $args} ret ] != 0 } {
1771				puts $ret
1772			}
1773
1774			# If it's one of the chosen tests, and btree, run with
1775			# log file zeroing.
1776			set zlog_idx [lsearch -exact $zero_log_tests $test]
1777			if { $run_zero == 1 && \
1778			    $method == "btree" && $zlog_idx > -1 } {
1779				if { [catch {eval {run_recd $method $test \
1780				    $run $display -zero_log} $args} ret ] != 0 } {
1781					puts $ret
1782				}
1783			}
1784
1785			if { $gen_upgrade_log == 1 } {
1786				save_upgrade_files $testdir
1787			}
1788		}
1789	}
1790
1791	# We can skip logtrack_summary during the crypto upgrade run -
1792	# it doesn't introduce any new log types.
1793	if { $run } {
1794		if { $gen_upgrade_log == 0 || $encrypt == 0 } {
1795			logtrack_summary
1796		}
1797	}
1798	set log_log_record_types 0
1799}
1800
1801# A small subset of tests to be used in conjunction with the
1802# automated builds.  Ideally these tests will cover a lot of ground
1803# but run in only 15 minutes or so.  You can put any test in the
1804# list of tests and it will be run all the ways that run_all
1805# runs it.
1806proc run_smoke { } {
1807	source ./include.tcl
1808	global valid_methods
1809
1810	fileremove -f SMOKE.OUT
1811
1812	set smoke_tests { \
1813	    lock001 log001 test001 test004 sdb001 sec001 rep001 txn001 }
1814
1815	# Run each test in all its permutations, and
1816	# concatenate the results in the file SMOKE.OUT.
1817	foreach test $smoke_tests {
1818		run_all $test
1819		set in [open ALL.OUT r]
1820		set out [open SMOKE.OUT a]
1821		while { [gets $in str] != -1 } {
1822			puts $out $str
1823		}
1824		close $in
1825		close $out
1826	}
1827}
1828
1829proc run_all { { testname ALL } args } {
1830	global test_names
1831	global one_test
1832	global has_crypto
1833	global valid_methods
1834	source ./include.tcl
1835
1836	fileremove -f ALL.OUT
1837
1838	set one_test $testname
1839	if { $one_test != "ALL" } {
1840		# Source testparams again to adjust test_names.
1841		source $test_path/testparams.tcl
1842	}
1843
1844	set exflgs [eval extractflags $args]
1845	set flags [lindex $exflgs 1]
1846	set display 1
1847	set run 1
1848	set am_only 0
1849	set parallel 0
1850	set nparalleltests 0
1851	set rflags {--}
1852	foreach f $flags {
1853		switch $f {
1854			m {
1855				set am_only 1
1856			}
1857			n {
1858				set display 1
1859				set run 0
1860				set rflags [linsert $rflags 0 "-n"]
1861			}
1862		}
1863	}
1864
1865	set o [open ALL.OUT a]
1866	if { $run == 1 } {
1867		puts -nonewline "Test suite run started at: "
1868		puts [clock format [clock seconds] -format "%H:%M %D"]
1869		puts [berkdb version -string]
1870
1871		puts -nonewline $o "Test suite run started at: "
1872		puts $o [clock format [clock seconds] -format "%H:%M %D"]
1873		puts $o [berkdb version -string]
1874	}
1875	close $o
1876	#
1877	# First run standard tests.  Send in a -A to let run_std know
1878	# that it is part of the "run_all" run, so that it doesn't
1879	# print out start/end times.
1880	#
1881	lappend args -A
1882	eval {run_std} $one_test $args
1883
1884	set test_pagesizes [get_test_pagesizes]
1885	set args [lindex $exflgs 0]
1886	set save_args $args
1887
1888	foreach pgsz $test_pagesizes {
1889		set args $save_args
1890		append args " -pagesize $pgsz -chksum"
1891		if { $am_only == 0 } {
1892			# Run recovery tests.
1893			#
1894			# XXX These don't actually work at multiple pagesizes;
1895			# disable them for now.
1896			#
1897			# XXX These too are broken into separate tclsh
1898			# instantiations so we don't require so much
1899			# memory, but I think it's cleaner
1900			# and more useful to do it down inside proc r than here,
1901			# since "r recd" gets done a lot and needs to work.
1902			#
1903			# XXX See comment in run_std for why this only directs
1904			# stdout and not stderr.  Don't worry--the right stuff
1905			# happens.
1906			#puts "Running recovery tests with pagesize $pgsz"
1907			#if [catch {exec $tclsh_path \
1908			#    << "source $test_path/test.tcl; \
1909			#    r $rflags recd $args" \
1910			#    2>@ stderr >> ALL.OUT } res] {
1911			#	set o [open ALL.OUT a]
1912			#	puts $o "FAIL: recd test:"
1913			#	puts $o $res
1914			#	close $o
1915			#}
1916		}
1917
1918		# Access method tests.
1919		# Run subdb tests with varying pagesizes too.
1920		# XXX
1921		# Broken up into separate tclsh instantiations so
1922		# we don't require so much memory.
1923		foreach method $valid_methods {
1924			puts "Running $method tests with pagesize $pgsz"
1925			foreach sub {test sdb si} {
1926				foreach test $test_names($sub) {
1927					if { $run == 0 } {
1928						set o [open ALL.OUT a]
1929						eval {run_method -$method \
1930						    $test $display $run $o} \
1931						    $args
1932						close $o
1933					}
1934					if { $run } {
1935						if [catch {exec $tclsh_path << \
1936						    "global one_test; \
1937						    set one_test $one_test; \
1938						    source $test_path/test.tcl; \
1939						    eval {run_method -$method \
1940						    $test $display $run \
1941						    stdout} $args" \
1942						    >>& ALL.OUT } res] {
1943							set o [open ALL.OUT a]
1944							puts $o "FAIL: \
1945							    -$method $test: $res"
1946							close $o
1947						}
1948					}
1949				}
1950			}
1951		}
1952	}
1953	set args $save_args
1954	#
1955	# Run access method tests at default page size in one env.
1956	#
1957	foreach method $valid_methods {
1958		puts "Running $method tests in a txn env"
1959		foreach sub {test sdb si} {
1960			foreach test $test_names($sub) {
1961				if { $run == 0 } {
1962					set o [open ALL.OUT a]
1963					run_envmethod -$method $test $display \
1964					    $run $o $args
1965					close $o
1966				}
1967				if { $run } {
1968					if [catch {exec $tclsh_path << \
1969					    "global one_test; \
1970					    set one_test $one_test; \
1971					    source $test_path/test.tcl; \
1972					    run_envmethod -$method $test \
1973				  	    $display $run stdout $args" \
1974					    >>& ALL.OUT } res] {
1975						set o [open ALL.OUT a]
1976						puts $o "FAIL: run_envmethod \
1977						    $method $test: $res"
1978						close $o
1979					}
1980				}
1981			}
1982		}
1983	}
1984	#
1985	# Run access method tests at default page size in thread-enabled env.
1986	# We're not truly running threaded tests, just testing the interface.
1987	#
1988	foreach method $valid_methods {
1989		puts "Running $method tests in a threaded txn env"
1990		foreach sub {test sdb si} {
1991			foreach test $test_names($sub) {
1992				if { $run == 0 } {
1993					set o [open ALL.OUT a]
1994					eval {run_envmethod -$method $test \
1995					    $display $run $o -thread}
1996					close $o
1997				}
1998				if { $run } {
1999					if [catch {exec $tclsh_path << \
2000					    "global one_test; \
2001					    set one_test $one_test; \
2002					    source $test_path/test.tcl; \
2003					    eval {run_envmethod -$method $test \
2004				  	    $display $run stdout -thread}" \
2005					    >>& ALL.OUT } res] {
2006						set o [open ALL.OUT a]
2007						puts $o "FAIL: run_envmethod \
2008						    $method $test -thread: $res"
2009						close $o
2010					}
2011				}
2012			}
2013		}
2014	}
2015	#
2016	# Run access method tests at default page size with -alloc enabled.
2017	#
2018	foreach method $valid_methods {
2019		puts "Running $method tests in an env with -alloc"
2020		foreach sub {test sdb si} {
2021			foreach test $test_names($sub) {
2022				if { $run == 0 } {
2023					set o [open ALL.OUT a]
2024					eval {run_envmethod -$method $test \
2025					    $display $run $o -alloc}
2026					close $o
2027				}
2028				if { $run } {
2029					if [catch {exec $tclsh_path << \
2030					    "global one_test; \
2031					    set one_test $one_test; \
2032					    source $test_path/test.tcl; \
2033					    eval {run_envmethod -$method $test \
2034				  	    $display $run stdout -alloc}" \
2035					    >>& ALL.OUT } res] {
2036						set o [open ALL.OUT a]
2037						puts $o "FAIL: run_envmethod \
2038						    $method $test -alloc: $res"
2039						close $o
2040					}
2041				}
2042			}
2043		}
2044	}
2045
2046	# Run standard access method tests under replication.
2047	#
2048	set test_list [list {"testNNN under replication"	"repmethod"}]
2049
2050	# If we're on Windows, Linux, FreeBSD, or Solaris, run the
2051	# bigfile tests.  These create files larger than 4 GB.
2052	if { $is_freebsd_test == 1 || $is_linux_test == 1 || \
2053	    $is_sunos_test == 1 || $is_windows_test == 1 } {
2054		lappend test_list {"big files"	"bigfile"}
2055	}
2056
2057	# If release supports encryption, run security tests.
2058	#
2059	if { $has_crypto == 1 } {
2060		lappend test_list {"testNNN with security"	"secmethod"}
2061	}
2062	#
2063	# If configured for RPC, then run rpc tests too.
2064	#
2065	if { [file exists ./berkeley_db_svc] ||
2066	     [file exists ./berkeley_db_cxxsvc] ||
2067	     [file exists ./berkeley_db_javasvc] } {
2068		lappend test_list {"RPC"	"rpc"}
2069	}
2070
2071	foreach pair $test_list {
2072		set msg [lindex $pair 0]
2073		set cmd [lindex $pair 1]
2074		puts "Running $msg tests"
2075		if [catch {exec $tclsh_path << \
2076		    "global one_test; set one_test $one_test; \
2077		    source $test_path/test.tcl; \
2078		    r $rflags $cmd $args" >>& ALL.OUT } res] {
2079			set o [open ALL.OUT a]
2080			puts $o "FAIL: $cmd test: $res"
2081			close $o
2082		}
2083	}
2084
2085	# If not actually running, no need to check for failure.
2086	if { $run == 0 } {
2087		return
2088	}
2089
2090	set failed 0
2091	set o [open ALL.OUT r]
2092	while { [gets $o line] >= 0 } {
2093		if { [regexp {^FAIL} $line] != 0 } {
2094			set failed 1
2095		}
2096	}
2097	close $o
2098	set o [open ALL.OUT a]
2099	if { $failed == 0 } {
2100		puts "Regression Tests Succeeded"
2101		puts $o "Regression Tests Succeeded"
2102	} else {
2103		puts "Regression Tests Failed; see ALL.OUT for log"
2104		puts $o "Regression Tests Failed"
2105	}
2106
2107	puts -nonewline "Test suite run completed at: "
2108	puts [clock format [clock seconds] -format "%H:%M %D"]
2109	puts -nonewline $o "Test suite run completed at: "
2110	puts $o [clock format [clock seconds] -format "%H:%M %D"]
2111	close $o
2112}
2113
2114proc run_all_new { { testname ALL } args } {
2115	global test_names
2116	global one_test
2117	global has_crypto
2118	global valid_methods
2119	source ./include.tcl
2120
2121	fileremove -f ALL.OUT
2122
2123	set one_test $testname
2124	if { $one_test != "ALL" } {
2125		# Source testparams again to adjust test_names.
2126		source $test_path/testparams.tcl
2127	}
2128
2129	set exflgs [eval extractflags $args]
2130	set flags [lindex $exflgs 1]
2131	set display 1
2132	set run 1
2133	set am_only 0
2134	set parallel 0
2135	set nparalleltests 0
2136	set rflags {--}
2137	foreach f $flags {
2138		switch $f {
2139			m {
2140				set am_only 1
2141			}
2142			n {
2143				set display 1
2144				set run 0
2145				set rflags [linsert $rflags 0 "-n"]
2146			}
2147		}
2148	}
2149
2150	set o [open ALL.OUT a]
2151	if { $run == 1 } {
2152		puts -nonewline "Test suite run started at: "
2153		puts [clock format [clock seconds] -format "%H:%M %D"]
2154		puts [berkdb version -string]
2155
2156		puts -nonewline $o "Test suite run started at: "
2157		puts $o [clock format [clock seconds] -format "%H:%M %D"]
2158		puts $o [berkdb version -string]
2159	}
2160	close $o
2161	#
2162	# First run standard tests.  Send in a -A to let run_std know
2163	# that it is part of the "run_all" run, so that it doesn't
2164	# print out start/end times.
2165	#
2166	lappend args -A
2167	eval {run_std} $one_test $args
2168
2169	set test_pagesizes [get_test_pagesizes]
2170	set args [lindex $exflgs 0]
2171	set save_args $args
2172
2173	#
2174	# Run access method tests at default page size in one env.
2175	#
2176	foreach method $valid_methods {
2177		puts "Running $method tests in a txn env"
2178		foreach sub {test sdb si} {
2179			foreach test $test_names($sub) {
2180				if { $run == 0 } {
2181					set o [open ALL.OUT a]
2182					run_envmethod -$method $test $display \
2183					    $run $o $args
2184					close $o
2185				}
2186				if { $run } {
2187					if [catch {exec $tclsh_path << \
2188					    "global one_test; \
2189					    set one_test $one_test; \
2190					    source $test_path/test.tcl; \
2191					    run_envmethod -$method $test \
2192				  	    $display $run stdout $args" \
2193					    >>& ALL.OUT } res] {
2194						set o [open ALL.OUT a]
2195						puts $o "FAIL: run_envmethod \
2196						    $method $test: $res"
2197						close $o
2198					}
2199				}
2200			}
2201		}
2202	}
2203	#
2204	# Run access method tests at default page size in thread-enabled env.
2205	# We're not truly running threaded tests, just testing the interface.
2206	#
2207	foreach method $valid_methods {
2208		puts "Running $method tests in a threaded txn env"
2209		set thread_tests "test001"
2210		foreach test $thread_tests {
2211			if { $run == 0 } {
2212				set o [open ALL.OUT a]
2213				eval {run_envmethod -$method $test \
2214				    $display $run $o -thread}
2215				close $o
2216			}
2217			if { $run } {
2218				if [catch {exec $tclsh_path << \
2219				    "global one_test; \
2220				    set one_test $one_test; \
2221				    source $test_path/test.tcl; \
2222				    eval {run_envmethod -$method $test \
2223			  	    $display $run stdout -thread}" \
2224				    >>& ALL.OUT } res] {
2225					set o [open ALL.OUT a]
2226					puts $o "FAIL: run_envmethod \
2227					    $method $test -thread: $res"
2228					close $o
2229				}
2230			}
2231		}
2232	}
2233	#
2234	# Run access method tests at default page size with -alloc enabled.
2235	#
2236	foreach method $valid_methods {
2237		puts "Running $method tests in an env with -alloc"
2238		set alloc_tests "test001"
2239		foreach test $alloc_tests {
2240			if { $run == 0 } {
2241				set o [open ALL.OUT a]
2242				eval {run_envmethod -$method $test \
2243				    $display $run $o -alloc}
2244				close $o
2245			}
2246			if { $run } {
2247				if [catch {exec $tclsh_path << \
2248				    "global one_test; \
2249				    set one_test $one_test; \
2250				    source $test_path/test.tcl; \
2251				    eval {run_envmethod -$method $test \
2252			  	    $display $run stdout -alloc}" \
2253				    >>& ALL.OUT } res] {
2254					set o [open ALL.OUT a]
2255					puts $o "FAIL: run_envmethod \
2256					    $method $test -alloc: $res"
2257					close $o
2258				}
2259			}
2260		}
2261	}
2262
2263	# Run standard access method tests under replication.
2264	#
2265	set test_list [list {"testNNN under replication"	"repmethod"}]
2266
2267	# If we're on Windows, Linux, FreeBSD, or Solaris, run the
2268	# bigfile tests.  These create files larger than 4 GB.
2269	if { $is_freebsd_test == 1 || $is_linux_test == 1 || \
2270	    $is_sunos_test == 1 || $is_windows_test == 1 } {
2271		lappend test_list {"big files"	"bigfile"}
2272	}
2273
2274	# If release supports encryption, run security tests.
2275	#
2276	if { $has_crypto == 1 } {
2277		lappend test_list {"testNNN with security"	"secmethod"}
2278	}
2279	#
2280	# If configured for RPC, then run rpc tests too.
2281	#
2282	if { [file exists ./berkeley_db_svc] ||
2283	     [file exists ./berkeley_db_cxxsvc] ||
2284	     [file exists ./berkeley_db_javasvc] } {
2285		lappend test_list {"RPC"	"rpc"}
2286	}
2287
2288	foreach pair $test_list {
2289		set msg [lindex $pair 0]
2290		set cmd [lindex $pair 1]
2291		puts "Running $msg tests"
2292		if [catch {exec $tclsh_path << \
2293		    "global one_test; set one_test $one_test; \
2294		    source $test_path/test.tcl; \
2295		    r $rflags $cmd $args" >>& ALL.OUT } res] {
2296			set o [open ALL.OUT a]
2297			puts $o "FAIL: $cmd test: $res"
2298			close $o
2299		}
2300	}
2301
2302	# If not actually running, no need to check for failure.
2303	if { $run == 0 } {
2304		return
2305	}
2306
2307	set failed 0
2308	set o [open ALL.OUT r]
2309	while { [gets $o line] >= 0 } {
2310		if { [regexp {^FAIL} $line] != 0 } {
2311			set failed 1
2312		}
2313	}
2314	close $o
2315	set o [open ALL.OUT a]
2316	if { $failed == 0 } {
2317		puts "Regression Tests Succeeded"
2318		puts $o "Regression Tests Succeeded"
2319	} else {
2320		puts "Regression Tests Failed; see ALL.OUT for log"
2321		puts $o "Regression Tests Failed"
2322	}
2323
2324	puts -nonewline "Test suite run completed at: "
2325	puts [clock format [clock seconds] -format "%H:%M %D"]
2326	puts -nonewline $o "Test suite run completed at: "
2327	puts $o [clock format [clock seconds] -format "%H:%M %D"]
2328	close $o
2329}
2330
2331#
2332# Run method tests in one environment.  (As opposed to run_envmethod
2333# which runs each test in its own, new environment.)
2334#
2335proc run_envmethod1 { method {display 0} {run 1} { outfile stdout } args } {
2336	global __debug_on
2337	global __debug_print
2338	global __debug_test
2339	global is_envmethod
2340	global test_names
2341	global parms
2342	source ./include.tcl
2343
2344	if { $run == 1 } {
2345		puts "run_envmethod1: $method $args"
2346	}
2347
2348	set is_envmethod 1
2349	if { $run == 1 } {
2350		check_handles
2351		env_cleanup $testdir
2352		error_check_good envremove [berkdb envremove -home $testdir] 0
2353		set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
2354 		    {-pagesize 512 -mode 0644 -home $testdir}]
2355		error_check_good env_open [is_valid_env $env] TRUE
2356		append largs " -env $env "
2357	}
2358
2359	if { $display } {
2360		# The envmethod1 tests can't be split up, since they share
2361		# an env.
2362		puts $outfile "eval run_envmethod1 $method $args"
2363	}
2364
2365	set stat [catch {
2366		foreach test $test_names(test) {
2367			if { [info exists parms($test)] != 1 } {
2368				puts stderr "$test disabled in\
2369				    testparams.tcl; skipping."
2370				continue
2371			}
2372			if { $run } {
2373				puts $outfile "[timestamp]"
2374				eval $test $method $parms($test) $largs
2375				if { $__debug_print != 0 } {
2376					puts $outfile ""
2377				}
2378				if { $__debug_on != 0 } {
2379					debug $__debug_test
2380				}
2381			}
2382			flush stdout
2383			flush stderr
2384		}
2385	} res]
2386	if { $stat != 0} {
2387		global errorInfo;
2388
2389		set fnl [string first "\n" $errorInfo]
2390		set theError [string range $errorInfo 0 [expr $fnl - 1]]
2391		if {[string first FAIL $errorInfo] == -1} {
2392			error "FAIL:[timestamp]\
2393			    run_envmethod: $method $test: $theError"
2394		} else {
2395			error $theError;
2396		}
2397	}
2398	set stat [catch {
2399		foreach test $test_names(test) {
2400			if { [info exists parms($test)] != 1 } {
2401				puts stderr "$test disabled in\
2402				    testparams.tcl; skipping."
2403				continue
2404			}
2405			if { $run } {
2406				puts $outfile "[timestamp]"
2407				eval $test $method $parms($test) $largs
2408				if { $__debug_print != 0 } {
2409					puts $outfile ""
2410				}
2411				if { $__debug_on != 0 } {
2412					debug $__debug_test
2413				}
2414			}
2415			flush stdout
2416			flush stderr
2417		}
2418	} res]
2419	if { $stat != 0} {
2420		global errorInfo;
2421
2422		set fnl [string first "\n" $errorInfo]
2423		set theError [string range $errorInfo 0 [expr $fnl - 1]]
2424		if {[string first FAIL $errorInfo] == -1} {
2425			error "FAIL:[timestamp]\
2426			    run_envmethod1: $method $test: $theError"
2427		} else {
2428			error $theError;
2429		}
2430	}
2431	if { $run == 1 } {
2432		error_check_good envclose [$env close] 0
2433		check_handles $outfile
2434	}
2435	set is_envmethod 0
2436
2437}
2438
2439# Run the secondary index tests.
2440proc sindex { {display 0} {run 1} {outfile stdout} {verbose 0} args } {
2441	global test_names
2442	global testdir
2443	global verbose_check_secondaries
2444	set verbose_check_secondaries $verbose
2445	# Standard number of secondary indices to create if a single-element
2446	# list of methods is passed into the secondary index tests.
2447	global nsecondaries
2448	set nsecondaries 2
2449
2450	# Run basic tests with a single secondary index and a small number
2451	# of keys, then again with a larger number of keys.  (Note that
2452	# we can't go above 5000, since we use two items from our
2453	# 10K-word list for each key/data pair.)
2454	foreach n { 200 5000 } {
2455		foreach pm { btree hash recno frecno queue queueext } {
2456			foreach sm { dbtree dhash ddbtree ddhash btree hash } {
2457				foreach test $test_names(si) {
2458					if { $display } {
2459						puts -nonewline $outfile \
2460						    "eval $test {\[list\
2461						    $pm $sm $sm\]} $n ;"
2462						puts -nonewline $outfile \
2463						    " verify_dir \
2464						    $testdir \"\" 1; "
2465						puts $outfile " salvage_dir \
2466						    $testdir "
2467					}
2468					if { $run } {
2469			 			check_handles $outfile
2470						eval $test \
2471						    {[list $pm $sm $sm]} $n
2472						verify_dir $testdir "" 1
2473						salvage_dir $testdir
2474					}
2475				}
2476			}
2477		}
2478	}
2479
2480	# Run tests with 20 secondaries.
2481	foreach pm { btree hash } {
2482		set methlist [list $pm]
2483		for { set j 1 } { $j <= 20 } {incr j} {
2484			# XXX this should incorporate hash after #3726
2485			if { $j % 2 == 0 } {
2486				lappend methlist "dbtree"
2487			} else {
2488				lappend methlist "ddbtree"
2489			}
2490		}
2491		foreach test $test_names(si) {
2492			if { $display } {
2493				puts "eval $test {\[list $methlist\]} 500"
2494			}
2495			if { $run } {
2496				eval $test {$methlist} 500
2497			}
2498		}
2499	}
2500}
2501
2502# Run secondary index join test.  (There's no point in running
2503# this with both lengths, the primary is unhappy for now with fixed-
2504# length records (XXX), and we need unsorted dups in the secondaries.)
2505proc sijoin { {display 0} {run 1} {outfile stdout} } {
2506	foreach pm { btree hash recno } {
2507		if { $display } {
2508			foreach sm { btree hash } {
2509				puts $outfile "eval sijointest\
2510				    {\[list $pm $sm $sm\]} 1000"
2511			}
2512			puts $outfile "eval sijointest\
2513			    {\[list $pm btree hash\]} 1000"
2514			puts $outfile "eval sijointest\
2515			    {\[list $pm hash btree\]} 1000"
2516		}
2517		if { $run } {
2518			foreach sm { btree hash } {
2519				eval sijointest {[list $pm $sm $sm]} 1000
2520			}
2521			eval sijointest {[list $pm btree hash]} 1000
2522			eval sijointest {[list $pm hash btree]} 1000
2523		}
2524	}
2525}
2526
2527proc run { proc_suffix method {start 1} {stop 999} } {
2528	global test_names
2529
2530	switch -exact -- $proc_suffix {
2531		envmethod -
2532		method -
2533		recd -
2534		repmethod -
2535		reptest -
2536		secenv -
2537		secmethod {
2538			# Run_recd runs the recd tests, all others
2539			# run the "testxxx" tests.
2540			if { $proc_suffix == "recd" } {
2541				set testtype recd
2542			} else {
2543				set testtype test
2544			}
2545
2546			for { set i $start } { $i <= $stop } { incr i } {
2547				set name [format "%s%03d" $testtype $i]
2548				# If a test number is missing, silently skip
2549				# to next test; sparse numbering is allowed.
2550				if { [lsearch -exact $test_names($testtype) \
2551				    $name] == -1 } {
2552					continue
2553				}
2554				run_$proc_suffix $method $name
2555			}
2556		}
2557		default {
2558			puts "$proc_suffix is not set up with to be used with run"
2559		}
2560	}
2561}
2562
2563
2564# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
2565# of these is the default pagesize.  We don't want to run all the AM tests
2566# twice, so figure out what the default page size is, then return the
2567# other two.
2568proc get_test_pagesizes { } {
2569	# Create an in-memory database.
2570	set db [berkdb_open -create -btree]
2571	error_check_good gtp_create [is_valid_db $db] TRUE
2572	set statret [$db stat]
2573	set pgsz 0
2574	foreach pair $statret {
2575		set fld [lindex $pair 0]
2576		if { [string compare $fld {Page size}] == 0 } {
2577			set pgsz [lindex $pair 1]
2578		}
2579	}
2580
2581	error_check_good gtp_close [$db close] 0
2582
2583	error_check_bad gtp_pgsz $pgsz 0
2584	switch $pgsz {
2585		512 { return {8192 65536} }
2586		8192 { return {512 65536} }
2587		65536 { return {512 8192} }
2588		default { return {512 8192 65536} }
2589	}
2590	error_check_good NOTREACHED 0 1
2591}
2592
2593proc run_timed_once { timedtest args } {
2594	set start [timestamp -r]
2595	set ret [catch {
2596		eval $timedtest $args
2597		flush stdout
2598		flush stderr
2599	} res]
2600	set stop [timestamp -r]
2601	if { $ret != 0 } {
2602		global errorInfo
2603
2604		set fnl [string first "\n" $errorInfo]
2605		set theError [string range $errorInfo 0 [expr $fnl - 1]]
2606		if {[string first FAIL $errorInfo] == -1} {
2607			error "FAIL:[timestamp]\
2608			    run_timed: $timedtest: $theError"
2609		} else {
2610			error $theError;
2611		}
2612	}
2613	return [expr $stop - $start]
2614}
2615
2616proc run_timed { niter timedtest args } {
2617	if { $niter < 1 } {
2618		error "run_timed: Invalid number of iterations $niter"
2619	}
2620	set sum 0
2621	set e {}
2622	for { set i 1 } { $i <= $niter } { incr i } {
2623		set elapsed [eval run_timed_once $timedtest $args]
2624		lappend e $elapsed
2625		set sum [expr $sum + $elapsed]
2626		puts "Test $timedtest run $i completed: $elapsed seconds"
2627	}
2628	if { $niter > 1 } {
2629		set avg [expr $sum / $niter]
2630		puts "Average $timedtest time: $avg"
2631		puts "Raw $timedtest data: $e"
2632	}
2633}
2634