1use strict;
2use warnings;  
3no warnings 'uninitialized';
4
5use Test::More;
6use Test::Exception;
7use lib qw(t/lib);
8use DBICTest;
9
10my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
11
12my $TESTS = 66 + 2;
13
14if (not ($dsn && $user)) {
15  plan skip_all =>
16    'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
17    "\nWarning: This test drops and creates the tables " .
18    "'artist', 'money_test' and 'bindtype_test'";
19} else {
20  plan tests => $TESTS*2 + 1;
21}
22
23my @storage_types = (
24  'DBI::Sybase::ASE',
25  'DBI::Sybase::ASE::NoBindVars',
26);
27eval "require DBIx::Class::Storage::$_;" for @storage_types;
28
29my $schema;
30my $storage_idx = -1;
31
32sub get_schema {
33  DBICTest::Schema->connect($dsn, $user, $pass, {
34    on_connect_call => [
35      [ blob_setup => log_on_update => 1 ], # this is a safer option
36    ],
37  });
38}
39
40my $ping_count = 0;
41{
42  my $ping = DBIx::Class::Storage::DBI::Sybase::ASE->can('_ping');
43  *DBIx::Class::Storage::DBI::Sybase::ASE::_ping = sub {
44    $ping_count++;
45    goto $ping;
46  };
47}
48
49for my $storage_type (@storage_types) {
50  $storage_idx++;
51
52  unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
53    DBICTest::Schema->storage_type("::$storage_type");
54  }
55
56  $schema = get_schema();
57
58  $schema->storage->ensure_connected;
59
60  if ($storage_idx == 0 &&
61      $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars')) {
62# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
63      my $tb = Test::More->builder;
64      $tb->skip('no placeholders') for 1..$TESTS;
65      next;
66  }
67
68  isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
69
70  $schema->storage->_dbh->disconnect;
71  lives_ok (sub { $schema->storage->dbh }, 'reconnect works');
72
73  $schema->storage->dbh_do (sub {
74      my ($storage, $dbh) = @_;
75      eval { $dbh->do("DROP TABLE artist") };
76      $dbh->do(<<'SQL');
77CREATE TABLE artist (
78   artistid INT IDENTITY PRIMARY KEY,
79   name VARCHAR(100),
80   rank INT DEFAULT 13 NOT NULL,
81   charfield CHAR(10) NULL
82)
83SQL
84  });
85
86  my %seen_id;
87
88# so we start unconnected
89  $schema->storage->disconnect;
90
91# test primary key handling
92  my $new = $schema->resultset('Artist')->create({ name => 'foo' });
93  ok($new->artistid > 0, "Auto-PK worked");
94
95  $seen_id{$new->artistid}++;
96
97# check redispatch to storage-specific insert when auto-detected storage
98  if ($storage_type eq 'DBI::Sybase::ASE') {
99    DBICTest::Schema->storage_type('::DBI');
100    $schema = get_schema();
101  }
102
103  $new = $schema->resultset('Artist')->create({ name => 'Artist 1' });
104  is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' );
105  $seen_id{$new->artistid}++;
106
107# inserts happen in a txn, so we make sure it still works inside a txn too
108  $schema->txn_begin;
109
110  for (2..6) {
111    $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
112    is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
113    $seen_id{$new->artistid}++;
114  }
115
116  $schema->txn_commit;
117
118# test simple count
119  is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok');
120
121# test LIMIT support
122  my $it = $schema->resultset('Artist')->search({
123    artistid => { '>' => 0 }
124  }, {
125    rows => 3,
126    order_by => 'artistid',
127  });
128
129  is( $it->count, 3, "LIMIT count ok" );
130
131  is( $it->next->name, "foo", "iterator->next ok" );
132  $it->next;
133  is( $it->next->name, "Artist 2", "iterator->next ok" );
134  is( $it->next, undef, "next past end of resultset ok" );
135
136# now try with offset
137  $it = $schema->resultset('Artist')->search({}, {
138    rows => 3,
139    offset => 3,
140    order_by => 'artistid',
141  });
142
143  is( $it->count, 3, "LIMIT with offset count ok" );
144
145  is( $it->next->name, "Artist 3", "iterator->next ok" );
146  $it->next;
147  is( $it->next->name, "Artist 5", "iterator->next ok" );
148  is( $it->next, undef, "next past end of resultset ok" );
149
150# now try a grouped count
151  $schema->resultset('Artist')->create({ name => 'Artist 6' })
152    for (1..6);
153
154  $it = $schema->resultset('Artist')->search({}, {
155    group_by => 'name'
156  });
157
158  is( $it->count, 7, 'COUNT of GROUP_BY ok' );
159
160# do an IDENTITY_INSERT
161  {
162    no warnings 'redefine';
163
164    my @debug_out;
165    local $schema->storage->{debug} = 1;
166    local $schema->storage->debugobj->{callback} = sub {
167      push @debug_out, $_[1];
168    };
169
170    my $txn_used = 0;
171    my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit;
172    local *DBIx::Class::Storage::DBI::txn_commit = sub {
173      $txn_used = 1;
174      goto &$txn_commit;
175    };
176
177    $schema->resultset('Artist')
178      ->create({ artistid => 999, name => 'mtfnpy' });
179
180    ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used');
181
182    SKIP: {
183      skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
184        if $storage_type =~ /NoBindVars/i;
185
186      is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT';
187    }
188  }
189
190# do an IDENTITY_UPDATE
191  {
192    my @debug_out;
193    local $schema->storage->{debug} = 1;
194    local $schema->storage->debugobj->{callback} = sub {
195      push @debug_out, $_[1];
196    };
197
198    lives_and {
199      $schema->resultset('Artist')
200        ->find(999)->update({ artistid => 555 });
201      ok((grep /IDENTITY_UPDATE/i, @debug_out));
202    } 'IDENTITY_UPDATE used';
203    $ping_count-- if $@;
204  }
205
206  my $bulk_rs = $schema->resultset('Artist')->search({
207    name => { -like => 'bulk artist %' }
208  });
209
210# test insert_bulk using populate.
211  SKIP: {
212    skip 'insert_bulk not supported', 4
213      unless $storage_type !~ /NoBindVars/i;
214
215    lives_ok {
216      $schema->resultset('Artist')->populate([
217        {
218          name => 'bulk artist 1',
219          charfield => 'foo',
220        },
221        {
222          name => 'bulk artist 2',
223          charfield => 'foo',
224        },
225        {
226          name => 'bulk artist 3',
227          charfield => 'foo',
228        },
229      ]);
230    } 'insert_bulk via populate';
231
232    is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
233
234    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
235      'column set correctly via insert_bulk');
236
237    my %bulk_ids;
238    @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
239
240    is ((scalar keys %bulk_ids), 3,
241      'identities generated correctly in insert_bulk');
242
243    $bulk_rs->delete;
244  }
245
246# make sure insert_bulk works a second time on the same connection
247  SKIP: {
248    skip 'insert_bulk not supported', 3
249      unless $storage_type !~ /NoBindVars/i;
250
251    lives_ok {
252      $schema->resultset('Artist')->populate([
253        {
254          name => 'bulk artist 1',
255          charfield => 'bar',
256        },
257        {
258          name => 'bulk artist 2',
259          charfield => 'bar',
260        },
261        {
262          name => 'bulk artist 3',
263          charfield => 'bar',
264        },
265      ]);
266    } 'insert_bulk via populate called a second time';
267
268    is $bulk_rs->count, 3,
269      'correct number inserted via insert_bulk';
270
271    is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
272      'column set correctly via insert_bulk');
273
274    $bulk_rs->delete;
275  }
276
277# test invalid insert_bulk (missing required column)
278#
279# There should be a rollback, reconnect and the next valid insert_bulk should
280# succeed.
281  throws_ok {
282    $schema->resultset('Artist')->populate([
283      {
284        charfield => 'foo',
285      }
286    ]);
287  } qr/no value or default|does not allow null|placeholders/i,
288# The second pattern is the error from fallback to regular array insert on
289# incompatible charset.
290# The third is for ::NoBindVars with no syb_has_blk.
291  'insert_bulk with missing required column throws error';
292
293# now test insert_bulk with IDENTITY_INSERT
294  SKIP: {
295    skip 'insert_bulk not supported', 3
296      unless $storage_type !~ /NoBindVars/i;
297
298    lives_ok {
299      $schema->resultset('Artist')->populate([
300        {
301          artistid => 2001,
302          name => 'bulk artist 1',
303          charfield => 'foo',
304        },
305        {
306          artistid => 2002,
307          name => 'bulk artist 2',
308          charfield => 'foo',
309        },
310        {
311          artistid => 2003,
312          name => 'bulk artist 3',
313          charfield => 'foo',
314        },
315      ]);
316    } 'insert_bulk with IDENTITY_INSERT via populate';
317
318    is $bulk_rs->count, 3,
319      'correct number inserted via insert_bulk with IDENTITY_INSERT';
320
321    is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
322      'column set correctly via insert_bulk with IDENTITY_INSERT');
323
324    $bulk_rs->delete;
325  }
326
327# test correlated subquery
328  my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
329    ->get_column('artistid')
330    ->as_query;
331  my $subq_rs = $schema->resultset('Artist')->search({
332    artistid => { -in => $subq }
333  });
334  is $subq_rs->count, 11, 'correlated subquery';
335
336# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
337  SKIP: {
338    skip 'TEXT/IMAGE support does not work with FreeTDS', 22
339      if $schema->storage->using_freetds;
340
341    my $dbh = $schema->storage->_dbh;
342    {
343      local $SIG{__WARN__} = sub {};
344      eval { $dbh->do('DROP TABLE bindtype_test') };
345
346      $dbh->do(qq[
347        CREATE TABLE bindtype_test 
348        (
349          id    INT   IDENTITY PRIMARY KEY,
350          bytea IMAGE NULL,
351          blob  IMAGE NULL,
352          clob  TEXT  NULL
353        )
354      ],{ RaiseError => 1, PrintError => 0 });
355    }
356
357    my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
358    $binstr{'large'} = $binstr{'small'} x 1024;
359
360    my $maxloblen = length $binstr{'large'};
361    
362    if (not $schema->storage->using_freetds) {
363      $dbh->{'LongReadLen'} = $maxloblen * 2;
364    } else {
365      $dbh->do("set textsize ".($maxloblen * 2));
366    }
367
368    my $rs = $schema->resultset('BindType');
369    my $last_id;
370
371    foreach my $type (qw(blob clob)) {
372      foreach my $size (qw(small large)) {
373        no warnings 'uninitialized';
374
375        my $created;
376        lives_ok {
377          $created = $rs->create( { $type => $binstr{$size} } )
378        } "inserted $size $type without dying";
379
380        $last_id = $created->id if $created;
381
382        lives_and {
383          ok($rs->find($last_id)->$type eq $binstr{$size})
384        } "verified inserted $size $type";
385      }
386    }
387
388    $rs->delete;
389
390    # blob insert with explicit PK
391    # also a good opportunity to test IDENTITY_INSERT
392    lives_ok {
393      $rs->create( { id => 1, blob => $binstr{large} } )
394    } 'inserted large blob without dying with manual PK';
395
396    lives_and {
397      ok($rs->find(1)->blob eq $binstr{large})
398    } 'verified inserted large blob with manual PK';
399
400    # try a blob update
401    my $new_str = $binstr{large} . 'mtfnpy';
402
403    # check redispatch to storage-specific update when auto-detected storage
404    if ($storage_type eq 'DBI::Sybase::ASE') {
405      DBICTest::Schema->storage_type('::DBI');
406      $schema = get_schema();
407    }
408
409    lives_ok {
410      $rs->search({ id => 1 })->update({ blob => $new_str })
411    } 'updated blob successfully';
412
413    lives_and {
414      ok($rs->find(1)->blob eq $new_str)
415    } 'verified updated blob';
416
417    # try a blob update with IDENTITY_UPDATE
418    lives_and {
419      $new_str = $binstr{large} . 'hlagh';
420      $rs->find(1)->update({ id => 999, blob => $new_str });
421      ok($rs->find(999)->blob eq $new_str);
422    } 'verified updated blob with IDENTITY_UPDATE';
423
424    ## try multi-row blob update
425    # first insert some blobs
426    $new_str = $binstr{large} . 'foo';
427    lives_and {
428      $rs->delete;
429      $rs->create({ blob => $binstr{large} }) for (1..2);
430      $rs->update({ blob => $new_str });
431      is((grep $_->blob eq $new_str, $rs->all), 2);
432    } 'multi-row blob update';
433
434    $rs->delete;
435
436    # now try insert_bulk with blobs and only blobs
437    $new_str = $binstr{large} . 'bar';
438    lives_ok {
439      $rs->populate([
440        {
441          bytea => 1,
442          blob => $binstr{large},
443          clob => $new_str,
444        },
445        {
446          bytea => 1,
447          blob => $binstr{large},
448          clob => $new_str,
449        },
450      ]);
451    } 'insert_bulk with blobs does not die';
452
453    is((grep $_->blob eq $binstr{large}, $rs->all), 2,
454      'IMAGE column set correctly via insert_bulk');
455
456    is((grep $_->clob eq $new_str, $rs->all), 2,
457      'TEXT column set correctly via insert_bulk');
458
459    # now try insert_bulk with blobs and a non-blob which also happens to be an
460    # identity column
461    SKIP: {
462      skip 'no insert_bulk without placeholders', 4
463        if $storage_type =~ /NoBindVars/i;
464
465      $rs->delete;
466      $new_str = $binstr{large} . 'bar';
467      lives_ok {
468        $rs->populate([
469          {
470            id => 1,
471            bytea => 1,
472            blob => $binstr{large},
473            clob => $new_str,
474          },
475          {
476            id => 2,
477            bytea => 1,
478            blob => $binstr{large},
479            clob => $new_str,
480          },
481        ]);
482      } 'insert_bulk with blobs and explicit identity does NOT die';
483
484      is((grep $_->blob eq $binstr{large}, $rs->all), 2,
485        'IMAGE column set correctly via insert_bulk with identity');
486
487      is((grep $_->clob eq $new_str, $rs->all), 2,
488        'TEXT column set correctly via insert_bulk with identity');
489
490      is_deeply [ map $_->id, $rs->all ], [ 1,2 ],
491        'explicit identities set correctly via insert_bulk with blobs';
492    }
493
494    lives_and {
495      $rs->delete;
496      $rs->create({ blob => $binstr{large} }) for (1..2);
497      $rs->update({ blob => undef });
498      is((grep !defined($_->blob), $rs->all), 2);
499    } 'blob update to NULL';
500  }
501
502# test MONEY column support (and some other misc. stuff)
503  $schema->storage->dbh_do (sub {
504      my ($storage, $dbh) = @_;
505      eval { $dbh->do("DROP TABLE money_test") };
506      $dbh->do(<<'SQL');
507CREATE TABLE money_test (
508   id INT IDENTITY PRIMARY KEY,
509   amount MONEY DEFAULT $999.99 NULL
510)
511SQL
512  });
513
514  my $rs = $schema->resultset('Money');
515
516# test insert with defaults
517  lives_and {
518    $rs->create({});
519    is((grep $_->amount == 999.99, $rs->all), 1);
520  } 'insert with all defaults works';
521  $rs->delete;
522
523# test insert transaction when there's an active cursor
524  {
525    my $artist_rs = $schema->resultset('Artist');
526    $artist_rs->first;
527    lives_ok {
528      my $row = $schema->resultset('Money')->create({ amount => 100 });
529      $row->delete;
530    } 'inserted a row with an active cursor';
531    $ping_count-- if $@; # dbh_do calls ->connected
532  }
533
534# test insert in an outer transaction when there's an active cursor
535  TODO: {
536    local $TODO = 'this should work once we have eager cursors';
537
538# clear state, or we get a deadlock on $row->delete
539# XXX figure out why this happens
540    $schema->storage->disconnect;
541
542    lives_ok {
543      $schema->txn_do(sub {
544        my $artist_rs = $schema->resultset('Artist');
545        $artist_rs->first;
546        my $row = $schema->resultset('Money')->create({ amount => 100 });
547        $row->delete;
548      });
549    } 'inserted a row with an active cursor in outer txn';
550    $ping_count-- if $@; # dbh_do calls ->connected
551  }
552
553# Now test money values.
554  my $row;
555  lives_ok {
556    $row = $rs->create({ amount => 100 });
557  } 'inserted a money value';
558
559  is eval { $rs->find($row->id)->amount }, 100, 'money value round-trip';
560
561  lives_ok {
562    $row->update({ amount => 200 });
563  } 'updated a money value';
564
565  is eval { $rs->find($row->id)->amount },
566    200, 'updated money value round-trip';
567
568  lives_ok {
569    $row->update({ amount => undef });
570  } 'updated a money value to NULL';
571
572  my $null_amount = eval { $rs->find($row->id)->amount };
573  ok(
574    (($null_amount == undef) && (not $@)),
575    'updated money value to NULL round-trip'
576  );
577  diag $@ if $@;
578
579# Test computed columns and timestamps
580  $schema->storage->dbh_do (sub {
581      my ($storage, $dbh) = @_;
582      eval { $dbh->do("DROP TABLE computed_column_test") };
583      $dbh->do(<<'SQL');
584CREATE TABLE computed_column_test (
585   id INT IDENTITY PRIMARY KEY,
586   a_computed_column AS getdate(),
587   a_timestamp timestamp,
588   charfield VARCHAR(20) DEFAULT 'foo' 
589)
590SQL
591  });
592
593  require DBICTest::Schema::ComputedColumn;
594  $schema->register_class(
595    ComputedColumn => 'DBICTest::Schema::ComputedColumn'
596  );
597
598  ok (($rs = $schema->resultset('ComputedColumn')),
599    'got rs for ComputedColumn');
600
601  lives_ok { $row = $rs->create({}) }
602    'empty insert for a table with computed columns survived';
603
604  lives_ok {
605    $row->update({ charfield => 'bar' })
606  } 'update of a table with computed columns survived';
607}
608
609is $ping_count, 0, 'no pings';
610
611# clean up our mess
612END {
613  if (my $dbh = eval { $schema->storage->_dbh }) {
614    eval { $dbh->do("DROP TABLE $_") }
615      for qw/artist bindtype_test money_test computed_column_test/;
616  }
617}
618