1# See the file LICENSE for redistribution information. 2# 3# Copyright (c) 1999,2008 Oracle. All rights reserved. 4# 5# $Id: recd007.tcl,v 12.11 2008/01/08 20:58:53 bostic Exp $ 6# 7# TEST recd007 8# TEST File create/delete tests. 9# TEST 10# TEST This is a recovery test for create/delete of databases. We have 11# TEST hooks in the database so that we can abort the process at various 12# TEST points and make sure that the transaction doesn't commit. We 13# TEST then need to recover and make sure the file is correctly existing 14# TEST or not, as the case may be. 15proc recd007 { method {select 0} args } { 16 global fixed_len 17 source ./include.tcl 18 19 set envargs "" 20 set zero_idx [lsearch -exact $args "-zero_log"] 21 if { $zero_idx != -1 } { 22 set args [lreplace $args $zero_idx $zero_idx] 23 set envargs "-zero_log" 24 } 25 26 set orig_fixed_len $fixed_len 27 set opts [convert_args $method $args] 28 set omethod [convert_method $method] 29 30 puts "Recd007: $method operation/transaction tests ($envargs)" 31 32 # Create the database and environment. 33 env_cleanup $testdir 34 35 set testfile recd007.db 36 set flags "-create -txn -home $testdir $envargs" 37 38 puts "\tRecd007.a: creating environment" 39 set env_cmd "berkdb_env $flags" 40 41 set env [eval $env_cmd] 42 43 # We need to create a database to get the pagesize (either 44 # the default or whatever might have been specified). 45 # Then remove it so we can compute fixed_len and create the 46 # real database. 47 set oflags "-create $omethod -mode 0644 -env $env $opts $testfile" 48 set db [eval {berkdb_open} $oflags] 49 error_check_good db_open [is_valid_db $db] TRUE 50 set stat [$db stat] 51 # 52 # Compute the fixed_len based on the pagesize being used. 53 # We want the fixed_len to be 1/4 the pagesize. 54 # 55 set pg [get_pagesize $stat] 56 error_check_bad get_pagesize $pg -1 57 set fixed_len [expr $pg / 4] 58 error_check_good db_close [$db close] 0 59 error_check_good dbremove [berkdb dbremove -env $env $testfile] 0 60 error_check_good log_flush [$env log_flush] 0 61 error_check_good envclose [$env close] 0 62 63 # Convert the args again because fixed_len is now real. 64 set opts [convert_args $method ""] 65 set save_opts $opts 66 set moreopts {" -lorder 1234 " " -lorder 1234 -chksum " \ 67 " -lorder 4321 " " -lorder 4321 -chksum "} 68 69 # List of recovery tests: {HOOKS MSG} pairs 70 # Where each HOOK is a list of {COPY ABORT} 71 # 72 set rlist { 73 { {"none" "preopen"} "Recd007.b0: none/preopen"} 74 { {"none" "postopen"} "Recd007.b1: none/postopen"} 75 { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"} 76 { {"none" "postlog"} "Recd007.b3: none/postlog"} 77 { {"none" "postsync"} "Recd007.b4: none/postsync"} 78 { {"postopen" "none"} "Recd007.c0: postopen/none"} 79 { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"} 80 { {"postlog" "none"} "Recd007.c2: postlog/none"} 81 { {"postsync" "none"} "Recd007.c3: postsync/none"} 82 { {"postopen" "postopen"} "Recd007.d: postopen/postopen"} 83 { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"} 84 { {"postopen" "postlog"} "Recd007.f: postopen/postlog"} 85 { {"postlog" "postlog"} "Recd007.g: postlog/postlog"} 86 { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"} 87 { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"} 88 { {"postlog" "postsync"} "Recd007.j: postlog/postsync"} 89 { {"postsync" "postsync"} "Recd007.k: postsync/postsync"} 90 } 91 92 # These are all the data values that we're going to need to read 93 # through the operation table and run the recovery tests. 94 95 foreach pair $rlist { 96 set cmd [lindex $pair 0] 97 set msg [lindex $pair 1] 98 # 99 # Run natively 100 # 101 file_recover_create $testdir $env_cmd $omethod \ 102 $save_opts $testfile $cmd $msg 103 foreach o $moreopts { 104 set opts $save_opts 105 append opts $o 106 file_recover_create $testdir $env_cmd $omethod \ 107 $opts $testfile $cmd $msg 108 } 109 } 110 111 set rlist { 112 { {"none" "predestroy"} "Recd007.l0: none/predestroy"} 113 { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"} 114 { {"predestroy" "none"} "Recd007.m0: predestroy/none"} 115 { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"} 116 { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"} 117 { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"} 118 { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"} 119 } 120 121 foreach op { dbremove dbrename dbtruncate } { 122 foreach pair $rlist { 123 set cmd [lindex $pair 0] 124 set msg [lindex $pair 1] 125 file_recover_delete $testdir $env_cmd $omethod \ 126 $save_opts $testfile $cmd $msg $op 127 foreach o $moreopts { 128 set opts $save_opts 129 append opts $o 130 file_recover_delete $testdir $env_cmd $omethod \ 131 $opts $testfile $cmd $msg $op 132 } 133 } 134 } 135 136 if { $is_windows_test != 1 } { 137 set env_cmd "berkdb_env_noerr $flags" 138 do_file_recover_delmk $testdir $env_cmd $method $opts $testfile 139 } 140 141 puts "\tRecd007.r: Verify db_printlog can read logfile" 142 set tmpfile $testdir/printlog.out 143 set stat [catch {exec $util_path/db_printlog -h $testdir \ 144 > $tmpfile} ret] 145 error_check_good db_printlog $stat 0 146 fileremove $tmpfile 147 set fixed_len $orig_fixed_len 148 return 149} 150 151proc file_recover_create { dir env_cmd method opts dbfile cmd msg } { 152 # 153 # We run this test on each of these scenarios: 154 # 1. Creating just a database 155 # 2. Creating a database with a subdb 156 # 3. Creating a 2nd subdb in a database 157 puts "\t$msg ($opts) create with a database" 158 do_file_recover_create $dir $env_cmd $method $opts $dbfile \ 159 0 $cmd $msg 160 if { [is_queue $method] == 1 } { 161 puts "\tSkipping subdatabase tests for method $method" 162 return 163 } 164 puts "\t$msg ($opts) create with a database and subdb" 165 do_file_recover_create $dir $env_cmd $method $opts $dbfile \ 166 1 $cmd $msg 167 puts "\t$msg ($opts) create with a database and 2nd subdb" 168 do_file_recover_create $dir $env_cmd $method $opts $dbfile \ 169 2 $cmd $msg 170 171} 172 173proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } { 174 global log_log_record_types 175 source ./include.tcl 176 177 # Keep track of the log types we've seen 178 if { $log_log_record_types == 1} { 179 logtrack_read $dir 180 } 181 182 env_cleanup $dir 183 set dflags "-dar" 184 # Open the environment and set the copy/abort locations 185 set env [eval $env_cmd] 186 set copy [lindex $cmd 0] 187 set abort [lindex $cmd 1] 188 error_check_good copy_location [is_valid_create_loc $copy] 1 189 error_check_good abort_location [is_valid_create_loc $abort] 1 190 191 if {([string first "logmeta" $copy] != -1 || \ 192 [string first "logmeta" $abort] != -1) && \ 193 [is_btree $method] == 0 } { 194 puts "\tSkipping for method $method" 195 $env test copy none 196 $env test abort none 197 error_check_good log_flush [$env log_flush] 0 198 error_check_good env_close [$env close] 0 199 return 200 } 201 202 # Basically non-existence is our initial state. When we 203 # abort, it is also our final state. 204 # 205 switch $sub { 206 0 { 207 set oflags "-create $method -auto_commit -mode 0644 \ 208 -env $env $opts $dbfile" 209 } 210 1 { 211 set oflags "-create $method -auto_commit -mode 0644 \ 212 -env $env $opts $dbfile sub0" 213 } 214 2 { 215 # 216 # If we are aborting here, then we need to 217 # create a first subdb, then create a second 218 # 219 set oflags "-create $method -auto_commit -mode 0644 \ 220 -env $env $opts $dbfile sub0" 221 set db [eval {berkdb_open} $oflags] 222 error_check_good db_open [is_valid_db $db] TRUE 223 error_check_good db_close [$db close] 0 224 set init_file $dir/$dbfile.init 225 catch { file copy -force $dir/$dbfile $init_file } res 226 set oflags "-create $method -auto_commit -mode 0644 \ 227 -env $env $opts $dbfile sub1" 228 } 229 default { 230 puts "\tBad value $sub for sub" 231 return 232 } 233 } 234 # 235 # Set our locations to copy and abort 236 # 237 set ret [eval $env test copy $copy] 238 error_check_good test_copy $ret 0 239 set ret [eval $env test abort $abort] 240 error_check_good test_abort $ret 0 241 242 puts "\t\tExecuting command" 243 set ret [catch {eval {berkdb_open} $oflags} db] 244 245 # Sync the mpool so any changes to the file that are 246 # in mpool get written to the disk file before the 247 # diff. 248 $env mpool_sync 249 250 # 251 # If we don't abort, then we expect success. 252 # If we abort, we expect no file created. 253 # 254 if {[string first "none" $abort] == -1} { 255 # 256 # Operation was aborted, verify it does 257 # not exist. 258 # 259 puts "\t\tCommand executed and aborted." 260 error_check_bad db_open ret 0 261 262 # 263 # Check that the file does not exist. Final state. 264 # 265 if { $sub != 2 } { 266 error_check_good db_open:exists \ 267 [file exists $dir/$dbfile] 0 268 } else { 269 error_check_good \ 270 diff(init,postcreate):diff($init_file,$dir/$dbfile)\ 271 [dbdump_diff $dflags $init_file $dir $dbfile] 0 272 } 273 } else { 274 # 275 # Operation was committed, verify it exists. 276 # 277 puts "\t\tCommand executed and committed." 278 error_check_good db_open [is_valid_db $db] TRUE 279 error_check_good db_close [$db close] 0 280 281 # 282 # Check that the file exists. 283 # 284 error_check_good db_open [file exists $dir/$dbfile] 1 285 set init_file $dir/$dbfile.init 286 catch { file copy -force $dir/$dbfile $init_file } res 287 288 if { [is_queue $method] == 1 } { 289 copy_extent_file $dir $dbfile init 290 } 291 } 292 error_check_good log_flush [$env log_flush] 0 293 error_check_good env_close [$env close] 0 294 295 # 296 # Run recovery here. Should be a no-op. Verify that 297 # the file still doesn't exist or change (depending on sub) 298 # when we are done. 299 # 300 berkdb debug_check 301 puts -nonewline "\t\tAbout to run recovery ... " 302 flush stdout 303 304 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 305 if { $stat == 1 } { 306 error "FAIL: Recovery error: $result." 307 return 308 } 309 puts "complete" 310 if { $sub != 2 && [string first "none" $abort] == -1} { 311 # 312 # Operation was aborted, verify it still does 313 # not exist. Only done with file creations. 314 # 315 error_check_good after_recover1 [file exists $dir/$dbfile] 0 316 } else { 317 # 318 # Operation was committed or just a subdb was aborted. 319 # Verify it did not change. 320 # 321 error_check_good \ 322 diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \ 323 [dbdump_diff $dflags $init_file $dir $dbfile] 0 324 # 325 # Need a new copy to get the right LSN into the file. 326 # 327 catch { file copy -force $dir/$dbfile $init_file } res 328 329 if { [is_queue $method] == 1 } { 330 copy_extent_file $dir $dbfile init 331 } 332 } 333 334 # If we didn't make a copy, then we are done. 335 # 336 if {[string first "none" $copy] != -1} { 337 return 338 } 339 340 # 341 # Now move the .afterop file to $dbfile. Run recovery again. 342 # 343 copy_afterop $dir 344 345 berkdb debug_check 346 puts -nonewline "\t\tAbout to run recovery ... " 347 flush stdout 348 349 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 350 if { $stat == 1 } { 351 error "FAIL: Recovery error: $result." 352 return 353 } 354 puts "complete" 355 if { $sub != 2 && [string first "none" $abort] == -1} { 356 # 357 # Operation was aborted, verify it still does 358 # not exist. Only done with file creations. 359 # 360 error_check_good after_recover2 [file exists $dir/$dbfile] 0 361 } else { 362 # 363 # Operation was committed or just a subdb was aborted. 364 # Verify it did not change. 365 # 366 error_check_good \ 367 diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \ 368 [dbdump_diff $dflags $init_file $dir $dbfile] 0 369 } 370 371} 372 373proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } { 374 # 375 # We run this test on each of these scenarios: 376 # 1. Deleting/Renaming just a database 377 # 2. Deleting/Renaming a database with a subdb 378 # 3. Deleting/Renaming a 2nd subdb in a database 379 puts "\t$msg $op ($opts) with a database" 380 do_file_recover_delete $dir $env_cmd $method $opts $dbfile \ 381 0 $cmd $msg $op 382 if { [is_queue $method] == 1 } { 383 puts "\tSkipping subdatabase tests for method $method" 384 return 385 } 386 puts "\t$msg $op ($opts) with a database and subdb" 387 do_file_recover_delete $dir $env_cmd $method $opts $dbfile \ 388 1 $cmd $msg $op 389 puts "\t$msg $op ($opts) with a database and 2nd subdb" 390 do_file_recover_delete $dir $env_cmd $method $opts $dbfile \ 391 2 $cmd $msg $op 392 393} 394 395proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } { 396 global log_log_record_types 397 source ./include.tcl 398 399 # Keep track of the log types we've seen 400 if { $log_log_record_types == 1} { 401 logtrack_read $dir 402 } 403 404 env_cleanup $dir 405 # Open the environment and set the copy/abort locations 406 set env [eval $env_cmd] 407 set copy [lindex $cmd 0] 408 set abort [lindex $cmd 1] 409 error_check_good copy_location [is_valid_delete_loc $copy] 1 410 error_check_good abort_location [is_valid_delete_loc $abort] 1 411 412 if { [is_record_based $method] == 1 } { 413 set key1 1 414 set key2 2 415 } else { 416 set key1 recd007_key1 417 set key2 recd007_key2 418 } 419 set data1 recd007_data0 420 set data2 recd007_data1 421 set data3 NEWrecd007_data2 422 423 # 424 # Depending on what sort of subdb we want, if any, our 425 # args to the open call will be different (and if we 426 # want a 2nd subdb, we create the first here. 427 # 428 # XXX 429 # For dbtruncate, we want oflags to have "$env" in it, 430 # not have the value currently in 'env'. That is why 431 # the '$' is protected below. Later on we use oflags 432 # but with a new $env we just opened. 433 # 434 switch $sub { 435 0 { 436 set subdb "" 437 set new $dbfile.new 438 set dflags "-dar" 439 set oflags "-create $method -auto_commit -mode 0644 \ 440 -env \$env $opts $dbfile" 441 } 442 1 { 443 set subdb sub0 444 set new $subdb.new 445 set dflags "" 446 set oflags "-create $method -auto_commit -mode 0644 \ 447 -env \$env $opts $dbfile $subdb" 448 } 449 2 { 450 # 451 # If we are aborting here, then we need to 452 # create a first subdb, then create a second 453 # 454 set subdb sub1 455 set new $subdb.new 456 set dflags "" 457 set oflags "-create $method -auto_commit -mode 0644 \ 458 -env \$env $opts $dbfile sub0" 459 set db [eval {berkdb_open} $oflags] 460 error_check_good db_open [is_valid_db $db] TRUE 461 set txn [$env txn] 462 set ret [$db put -txn $txn $key1 $data1] 463 error_check_good db_put $ret 0 464 error_check_good commit [$txn commit] 0 465 error_check_good db_close [$db close] 0 466 set oflags "-create $method -auto_commit -mode 0644 \ 467 -env \$env $opts $dbfile $subdb" 468 } 469 default { 470 puts "\tBad value $sub for sub" 471 return 472 } 473 } 474 475 # 476 # Set our locations to copy and abort 477 # 478 set ret [eval $env test copy $copy] 479 error_check_good test_copy $ret 0 480 set ret [eval $env test abort $abort] 481 error_check_good test_abort $ret 0 482 483 # 484 # Open our db, add some data, close and copy as our 485 # init file. 486 # 487 set db [eval {berkdb_open} $oflags] 488 error_check_good db_open [is_valid_db $db] TRUE 489 set txn [$env txn] 490 set ret [$db put -txn $txn $key1 $data1] 491 error_check_good db_put $ret 0 492 set ret [$db put -txn $txn $key2 $data2] 493 error_check_good db_put $ret 0 494 error_check_good commit [$txn commit] 0 495 error_check_good db_close [$db close] 0 496 497 $env mpool_sync 498 499 set init_file $dir/$dbfile.init 500 catch { file copy -force $dir/$dbfile $init_file } res 501 502 if { [is_queue $method] == 1} { 503 copy_extent_file $dir $dbfile init 504 } 505 506 # 507 # If we don't abort, then we expect success. 508 # If we abort, we expect no file removed. 509 # 510 switch $op { 511 "dbrename" { 512 set ret [catch { eval {berkdb} $op -env $env -auto_commit \ 513 $dbfile $subdb $new } remret] 514 } 515 "dbremove" { 516 set ret [catch { eval {berkdb} $op -env $env -auto_commit \ 517 $dbfile $subdb } remret] 518 } 519 "dbtruncate" { 520 set txn [$env txn] 521 set db [eval {berkdb_open_noerr -env} \ 522 $env -auto_commit $dbfile $subdb] 523 error_check_good dbopen [is_valid_db $db] TRUE 524 error_check_good txnbegin [is_valid_txn $txn $env] TRUE 525 set ret [catch {$db truncate -txn $txn} remret] 526 } 527 } 528 $env mpool_sync 529 if { $abort == "none" } { 530 if { $op == "dbtruncate" } { 531 error_check_good txncommit [$txn commit] 0 532 error_check_good dbclose [$db close] 0 533 } 534 # 535 # Operation was committed, verify it. 536 # 537 puts "\t\tCommand executed and committed." 538 error_check_good $op $ret 0 539 # 540 # If a dbtruncate, check that truncate returned the number 541 # of items previously in the database. 542 # 543 if { [string compare $op "dbtruncate"] == 0 } { 544 error_check_good remret $remret 2 545 } 546 recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags 547 } else { 548 # 549 # Operation was aborted, verify it did not change. 550 # 551 if { $op == "dbtruncate" } { 552 error_check_good txnabort [$txn abort] 0 553 error_check_good dbclose [$db close] 0 554 } 555 puts "\t\tCommand executed and aborted." 556 error_check_good $op $ret 1 557 558 # 559 # Check that the file exists. Final state. 560 # Compare against initial file. 561 # 562 error_check_good post$op.1 [file exists $dir/$dbfile] 1 563 error_check_good \ 564 diff(init,post$op.2):diff($init_file,$dir/$dbfile)\ 565 [dbdump_diff $dflags $init_file $dir $dbfile] 0 566 } 567 $env mpool_sync 568 error_check_good log_flush [$env log_flush] 0 569 error_check_good env_close [$env close] 0 570 catch { file copy -force $dir/$dbfile $init_file } res 571 if { [is_queue $method] == 1} { 572 copy_extent_file $dir $dbfile init 573 } 574 575 576 # 577 # Run recovery here. Should be a no-op. Verify that 578 # the file still doesn't exist or change (depending on abort) 579 # when we are done. 580 # 581 berkdb debug_check 582 puts -nonewline "\t\tAbout to run recovery ... " 583 flush stdout 584 585 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 586 if { $stat == 1 } { 587 error "FAIL: Recovery error: $result." 588 return 589 } 590 591 puts "complete" 592 593 if { $abort == "none" } { 594 # 595 # Operate was committed. 596 # 597 set env [eval $env_cmd] 598 recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags 599 error_check_good log_flush [$env log_flush] 0 600 error_check_good env_close [$env close] 0 601 } else { 602 # 603 # Operation was aborted, verify it did not change. 604 # 605 berkdb debug_check 606 error_check_good \ 607 diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \ 608 [dbdump_diff $dflags $init_file $dir $dbfile] 0 609 } 610 611 # 612 # If we didn't make a copy, then we are done. 613 # 614 if {[string first "none" $copy] != -1} { 615 return 616 } 617 618 # 619 # Now restore the .afterop file(s) to their original name. 620 # Run recovery again. 621 # 622 copy_afterop $dir 623 624 berkdb debug_check 625 puts -nonewline "\t\tAbout to run recovery ... " 626 flush stdout 627 628 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 629 if { $stat == 1 } { 630 error "FAIL: Recovery error: $result." 631 return 632 } 633 puts "complete" 634 635 if { [string first "none" $abort] != -1} { 636 set env [eval $env_cmd] 637 recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags 638 error_check_good log_flush [$env log_flush] 0 639 error_check_good env_close [$env close] 0 640 } else { 641 # 642 # Operation was aborted, verify it did not change. 643 # 644 error_check_good \ 645 diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \ 646 [dbdump_diff $dflags $init_file $dir $dbfile] 0 647 } 648 649} 650 651# 652# This function tests a specific case of recovering after a db removal. 653# This is for SR #2538. Basically we want to test that: 654# - Make an env. 655# - Make/close a db. 656# - Remove the db. 657# - Create another db of same name. 658# - Sync db but leave open. 659# - Run recovery. 660# - Verify no recovery errors and that new db is there. 661proc do_file_recover_delmk { dir env_cmd method opts dbfile } { 662 global log_log_record_types 663 source ./include.tcl 664 665 # Keep track of the log types we've seen 666 if { $log_log_record_types == 1} { 667 logtrack_read $dir 668 } 669 set omethod [convert_method $method] 670 671 puts "\tRecd007.q: Delete and recreate a database" 672 env_cleanup $dir 673 # Open the environment and set the copy/abort locations 674 set env [eval $env_cmd] 675 error_check_good env_open [is_valid_env $env] TRUE 676 677 if { [is_record_based $method] == 1 } { 678 set key 1 679 } else { 680 set key recd007_key 681 } 682 set data1 recd007_data 683 set data2 NEWrecd007_data2 684 set data3 LASTrecd007_data3 685 686 set oflags \ 687 "-create $omethod -auto_commit -mode 0644 $opts $dbfile" 688 689 # 690 # Open our db, add some data, close and copy as our 691 # init file. 692 # 693 set db [eval {berkdb_open_noerr} -env $env $oflags] 694 error_check_good db_open [is_valid_db $db] TRUE 695 set txn [$env txn] 696 set ret [$db put -txn $txn $key $data1] 697 error_check_good db_put $ret 0 698 error_check_good commit [$txn commit] 0 699 error_check_good db_close [$db close] 0 700 file copy -force $testdir/$dbfile $testdir/${dbfile}.1 701 702 set ret \ 703 [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret] 704 705 # 706 # Operation was committed, verify it does 707 # not exist. 708 # 709 puts "\t\tCommand executed and committed." 710 error_check_good dbremove $ret 0 711 error_check_good dbremove.1 [file exists $dir/$dbfile] 0 712 713 # 714 # Now create a new db with the same name. 715 # 716 set db [eval {berkdb_open_noerr} -env $env $oflags] 717 error_check_good db_open [is_valid_db $db] TRUE 718 set txn [$env txn] 719 set ret [$db put -txn $txn $key [chop_data $method $data2]] 720 error_check_good db_put $ret 0 721 error_check_good commit [$txn commit] 0 722 error_check_good db_sync [$db sync] 0 723 724 berkdb debug_check 725 puts -nonewline "\t\tAbout to run recovery ... " 726 flush stdout 727 728 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 729 if { $stat == 1 } { 730 error "FAIL: Recovery error: $result." 731 return 732 } 733 puts "complete" 734 error_check_good db_recover $stat 0 735 error_check_good file_exist [file exists $dir/$dbfile] 1 736 # 737 # Since we ran recovery on the open db/env, we need to 738 # catch these calls. Basically they are there to clean 739 # up the Tcl widgets. 740 # 741 set stat [catch {$db close} ret] 742 error_check_bad dbclose_after_remove $stat 0 743 error_check_good dbclose_after_remove [is_substr $ret recovery] 1 744 set stat [catch {$env log_flush} ret] 745 set stat [catch {$env close} ret] 746 error_check_bad envclose_after_remove $stat 0 747 error_check_good envclose_after_remove [is_substr $ret recovery] 1 748 749 # 750 # Reopen env and db and verify 2nd database is there. 751 # 752 set env [eval $env_cmd] 753 error_check_good env_open [is_valid_env $env] TRUE 754 set db [eval {berkdb_open} -env $env $oflags] 755 error_check_good db_open [is_valid_db $db] TRUE 756 set ret [$db get $key] 757 error_check_good dbget [llength $ret] 1 758 set kd [lindex $ret 0] 759 error_check_good key [lindex $kd 0] $key 760 error_check_good data2 [lindex $kd 1] [pad_data $method $data2] 761 762 error_check_good dbclose [$db close] 0 763 error_check_good log_flush [$env log_flush] 0 764 error_check_good envclose [$env close] 0 765 766 # 767 # Copy back the original database and run recovery again. 768 # SR [#13026] 769 # 770 puts "\t\tRecover from first database" 771 file copy -force $testdir/${dbfile}.1 $testdir/$dbfile 772 berkdb debug_check 773 puts -nonewline "\t\tAbout to run recovery ... " 774 flush stdout 775 776 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 777 if { $stat == 1 } { 778 error "FAIL: Recovery error: $result." 779 return 780 } 781 puts "complete" 782 error_check_good db_recover $stat 0 783 error_check_good db_recover.1 [file exists $dir/$dbfile] 1 784 785 # 786 # Reopen env and db and verify 2nd database is there. 787 # 788 set env [eval $env_cmd] 789 error_check_good env_open [is_valid_env $env] TRUE 790 set db [eval {berkdb_open_noerr} -env $env $oflags] 791 error_check_good db_open [is_valid_db $db] TRUE 792 set ret [$db get $key] 793 error_check_good dbget [llength $ret] 1 794 set kd [lindex $ret 0] 795 error_check_good key [lindex $kd 0] $key 796 error_check_good data2 [lindex $kd 1] [pad_data $method $data2] 797 798 error_check_good dbclose [$db close] 0 799 800 file copy -force $testdir/$dbfile $testdir/${dbfile}.2 801 802 puts "\t\tRemove second db" 803 set ret \ 804 [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret] 805 806 # 807 # Operation was committed, verify it does 808 # not exist. 809 # 810 puts "\t\tCommand executed and committed." 811 error_check_good dbremove $ret 0 812 error_check_good dbremove.2 [file exists $dir/$dbfile] 0 813 814 # 815 # Now create a new db with the same name. 816 # 817 puts "\t\tAdd a third version of the database" 818 set db [eval {berkdb_open_noerr} -env $env $oflags] 819 error_check_good db_open [is_valid_db $db] TRUE 820 set txn [$env txn] 821 set ret [$db put -txn $txn $key [chop_data $method $data3]] 822 error_check_good db_put $ret 0 823 error_check_good commit [$txn commit] 0 824 error_check_good db_sync [$db sync] 0 825 826 berkdb debug_check 827 puts -nonewline "\t\tAbout to run recovery ... " 828 flush stdout 829 830 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 831 if { $stat == 1 } { 832 error "FAIL: Recovery error: $result." 833 return 834 } 835 puts "complete" 836 error_check_good db_recover $stat 0 837 error_check_good file_exist [file exists $dir/$dbfile] 1 838 839 # 840 # Since we ran recovery on the open db/env, we need to 841 # catch these calls to clean up the Tcl widgets. 842 # 843 set stat [catch {$db close} ret] 844 error_check_bad dbclose_after_remove $stat 0 845 error_check_good dbclose_after_remove [is_substr $ret recovery] 1 846 set stat [catch {$env log_flush} ret] 847 set stat [catch {$env close} ret] 848 error_check_bad envclose_after_remove $stat 0 849 error_check_good envclose_after_remove [is_substr $ret recovery] 1 850 851 # 852 # Copy back the second database and run recovery again. 853 # 854 puts "\t\tRecover from second database" 855 file copy -force $testdir/${dbfile}.2 $testdir/$dbfile 856 berkdb debug_check 857 puts -nonewline "\t\tAbout to run recovery ... " 858 flush stdout 859 860 set stat [catch {exec $util_path/db_recover -h $dir -c} result] 861 if { $stat == 1 } { 862 error "FAIL: Recovery error: $result." 863 return 864 } 865 puts "complete" 866 error_check_good db_recover $stat 0 867 error_check_good file_exist.2 [file exists $dir/$dbfile] 1 868 869 # 870 # Reopen env and db and verify 3rd database is there. 871 # 872 set env [eval $env_cmd] 873 error_check_good env_open [is_valid_env $env] TRUE 874 set db [eval {berkdb_open} -env $env $oflags] 875 error_check_good db_open [is_valid_db $db] TRUE 876 set ret [$db get $key] 877 error_check_good dbget [llength $ret] 1 878 set kd [lindex $ret 0] 879 error_check_good key [lindex $kd 0] $key 880 error_check_good data2 [lindex $kd 1] [pad_data $method $data3] 881 882 error_check_good dbclose [$db close] 0 883 error_check_good log_flush [$env log_flush] 0 884 error_check_good envclose [$env close] 0 885} 886 887proc is_valid_create_loc { loc } { 888 switch $loc { 889 none - 890 preopen - 891 postopen - 892 postlogmeta - 893 postlog - 894 postsync 895 { return 1 } 896 default 897 { return 0 } 898 } 899} 900 901proc is_valid_delete_loc { loc } { 902 switch $loc { 903 none - 904 predestroy - 905 postdestroy - 906 postremcall 907 { return 1 } 908 default 909 { return 0 } 910 } 911} 912 913# Do a logical diff on the db dump files. We expect that either 914# the files are identical, or if they differ, that it is exactly 915# just a free/invalid page. 916# Return 1 if they are different, 0 if logically the same (or identical). 917# 918proc dbdump_diff { flags initfile dir dbfile } { 919 source ./include.tcl 920 921 set initdump $initfile.dump 922 set dbdump $dbfile.dump 923 924 set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \ 925 $initfile} ret] 926 error_check_good dbdump.init $stat 0 927 928 # Do a dump without the freelist which should eliminate any 929 # recovery differences. 930 set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \ 931 $dir/$dbfile} ret] 932 error_check_good dbdump.db $stat 0 933 934 set stat [filecmp $dir/$dbdump $initdump] 935 936 if {$stat == 0} { 937 return 0 938 } 939 puts "diff: $dbdump $initdump gives:\n$ret" 940 return 1 941} 942 943proc recd007_check { op sub dir dbfile subdb new env oflags } { 944 # 945 # No matter how many subdbs we have, dbtruncate will always 946 # have a file, and if we open our particular db, it should 947 # have no entries. 948 # 949 if { $sub == 0 } { 950 if { $op == "dbremove" } { 951 error_check_good $op:not-exist:$dir/$dbfile \ 952 [file exists $dir/$dbfile] 0 953 } elseif { $op == "dbrename"} { 954 error_check_good $op:exist \ 955 [file exists $dir/$dbfile] 0 956 error_check_good $op:exist2 \ 957 [file exists $dir/$dbfile.new] 1 958 } else { 959 error_check_good $op:exist \ 960 [file exists $dir/$dbfile] 1 961 set db [eval {berkdb_open} $oflags] 962 error_check_good db_open [is_valid_db $db] TRUE 963 set dbc [$db cursor] 964 error_check_good dbc_open \ 965 [is_valid_cursor $dbc $db] TRUE 966 set ret [$dbc get -first] 967 error_check_good dbget1 [llength $ret] 0 968 error_check_good dbc_close [$dbc close] 0 969 error_check_good db_close [$db close] 0 970 } 971 return 972 } else { 973 set t1 $dir/t1 974 # 975 # If we have subdbs, check that all but the last one 976 # are there, and the last one is correctly operated on. 977 # 978 set db [berkdb_open -rdonly -env $env $dbfile] 979 error_check_good dbopen [is_valid_db $db] TRUE 980 set c [eval {$db cursor}] 981 error_check_good db_cursor [is_valid_cursor $c $db] TRUE 982 set d [$c get -last] 983 if { $op == "dbremove" } { 984 if { $sub == 1 } { 985 error_check_good subdb:rem [llength $d] 0 986 } else { 987 error_check_bad subdb:rem [llength $d] 0 988 set sdb [lindex [lindex $d 0] 0] 989 error_check_bad subdb:rem1 $sdb $subdb 990 } 991 } elseif { $op == "dbrename"} { 992 set sdb [lindex [lindex $d 0] 0] 993 error_check_good subdb:ren $sdb $new 994 if { $sub != 1 } { 995 set d [$c get -prev] 996 error_check_bad subdb:ren [llength $d] 0 997 set sdb [lindex [lindex $d 0] 0] 998 error_check_good subdb:ren1 \ 999 [is_substr "new" $sdb] 0 1000 } 1001 } else { 1002 set sdb [lindex [lindex $d 0] 0] 1003 set dbt [berkdb_open -rdonly -env $env $dbfile $sdb] 1004 error_check_good db_open [is_valid_db $dbt] TRUE 1005 set dbc [$dbt cursor] 1006 error_check_good dbc_open \ 1007 [is_valid_cursor $dbc $dbt] TRUE 1008 set ret [$dbc get -first] 1009 error_check_good dbget2 [llength $ret] 0 1010 error_check_good dbc_close [$dbc close] 0 1011 error_check_good db_close [$dbt close] 0 1012 if { $sub != 1 } { 1013 set d [$c get -prev] 1014 error_check_bad subdb:ren [llength $d] 0 1015 set sdb [lindex [lindex $d 0] 0] 1016 set dbt [berkdb_open -rdonly -env $env \ 1017 $dbfile $sdb] 1018 error_check_good db_open [is_valid_db $dbt] TRUE 1019 set dbc [$db cursor] 1020 error_check_good dbc_open \ 1021 [is_valid_cursor $dbc $db] TRUE 1022 set ret [$dbc get -first] 1023 error_check_bad dbget3 [llength $ret] 0 1024 error_check_good dbc_close [$dbc close] 0 1025 error_check_good db_close [$dbt close] 0 1026 } 1027 } 1028 error_check_good dbcclose [$c close] 0 1029 error_check_good db_close [$db close] 0 1030 } 1031} 1032 1033proc copy_afterop { dir } { 1034 set r [catch { set filecopy [glob $dir/*.afterop] } res] 1035 if { $r == 1 } { 1036 return 1037 } 1038 foreach f $filecopy { 1039 set orig [string range $f 0 \ 1040 [expr [string last "." $f] - 1]] 1041 catch { file rename -force $f $orig} res 1042 } 1043} 1044