1# See the file LICENSE for redistribution information. 2# 3# Copyright (c) 1999,2008 Oracle. All rights reserved. 4# 5# $Id: sdb011.tcl,v 12.6 2008/01/08 20:58:53 bostic Exp $ 6# 7# TEST sdb011 8# TEST Test deleting Subdbs with overflow pages 9# TEST Create 1 db with many large subdbs. 10# TEST Test subdatabases with overflow pages. 11proc sdb011 { method {ndups 13} {nsubdbs 10} args} { 12 global names 13 source ./include.tcl 14 global rand_init 15 error_check_good set_random_seed [berkdb srand $rand_init] 0 16 17 set args [convert_args $method $args] 18 set omethod [convert_method $method] 19 20 if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } { 21 puts "Subdb011: skipping for method $method" 22 return 23 } 24 set txnenv 0 25 set envargs "" 26 set max_files 0 27 set eindex [lsearch -exact $args "-env"] 28 # 29 # If we are using an env, then testfile should just be the db name. 30 # Otherwise it is the test directory and the name. 31 if { $eindex == -1 } { 32 set testfile $testdir/subdb011.db 33 set env NULL 34 set tfpath $testfile 35 } else { 36 set testfile subdb011.db 37 incr eindex 38 set env [lindex $args $eindex] 39 set envargs " -env $env " 40 set txnenv [is_txnenv $env] 41 if { $txnenv == 1 } { 42 append args " -auto_commit " 43 append envargs " -auto_commit " 44 set max_files 50 45 if { $ndups == 13 } { 46 set ndups 7 47 } 48 } 49 set testdir [get_home $env] 50 set tfpath $testdir/$testfile 51 } 52 53 # Create the database and open the dictionary 54 55 cleanup $testdir $env 56 set txn "" 57 58 # Here is the loop where we put and get each key/data pair 59 set file_list [get_file_list] 60 set flen [llength $file_list] 61 puts "Subdb011: $method ($args) $ndups overflow dups with \ 62 $flen filename=key filecontents=data pairs" 63 64 puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups" 65 set slist {} 66 set i 0 67 set count 0 68 foreach f $file_list { 69 set i [expr $i % $nsubdbs] 70 if { [is_record_based $method] == 1 } { 71 set key [expr $count + 1] 72 set names([expr $count + 1]) $f 73 } else { 74 set key $f 75 } 76 # Should really catch errors 77 set fid [open $f r] 78 fconfigure $fid -translation binary 79 set filecont [read $fid] 80 set subdb subdb$i 81 lappend slist $subdb 82 close $fid 83 set db [eval {berkdb_open -create -mode 0644} \ 84 $args {$omethod $testfile $subdb}] 85 error_check_good dbopen [is_valid_db $db] TRUE 86 for {set dup 0} {$dup < $ndups} {incr dup} { 87 set data $dup:$filecont 88 if { $txnenv == 1 } { 89 set t [$env txn] 90 error_check_good txn [is_valid_txn $t $env] TRUE 91 set txn "-txn $t" 92 } 93 set ret [eval {$db put} $txn {$key \ 94 [chop_data $method $data]}] 95 error_check_good put $ret 0 96 if { $txnenv == 1 } { 97 error_check_good txn [$t commit] 0 98 } 99 } 100 error_check_good dbclose [$db close] 0 101 incr i 102 incr count 103 } 104 105 puts "\tSubdb011.b: Verify overflow pages" 106 foreach subdb $slist { 107 set db [eval {berkdb_open -create -mode 0644} \ 108 $args {$omethod $testfile $subdb}] 109 error_check_good dbopen [is_valid_db $db] TRUE 110 set stat [$db stat] 111 112 # What everyone else calls overflow pages, hash calls "big 113 # pages", so we need to special-case hash here. (Hash 114 # overflow pages are additional pages after the first in a 115 # bucket.) 116 if { [string compare [$db get_type] hash] == 0 } { 117 error_check_bad overflow \ 118 [is_substr $stat "{{Number of big pages} 0}"] 1 119 } else { 120 error_check_bad overflow \ 121 [is_substr $stat "{{Overflow pages} 0}"] 1 122 } 123 error_check_good dbclose [$db close] 0 124 } 125 126 puts "\tSubdb011.c: Delete subdatabases" 127 for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} { 128 # 129 # Randomly delete a subdatabase 130 set sindex [berkdb random_int 0 [expr $i - 1]] 131 set subdb [lindex $slist $sindex] 132 # 133 # Delete the one we did from the list 134 set slist [lreplace $slist $sindex $sindex] 135 error_check_good file_exists_before [file exists $tfpath] 1 136 error_check_good db_remove [eval {berkdb dbremove} $envargs \ 137 {$testfile $subdb}] 0 138 } 139} 140 141