1/*************************************************************************** 2 * _ _ ____ _ 3 * Project ___| | | | _ \| | 4 * / __| | | | |_) | | 5 * | (__| |_| | _ <| |___ 6 * \___|\___/|_| \_\_____| 7 * 8 * Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al. 9 * 10 * This software is licensed as described in the file COPYING, which 11 * you should have received as part of this distribution. The terms 12 * are also available at http://curl.haxx.se/docs/copyright.html. 13 * 14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell 15 * copies of the Software, and permit persons to whom the Software is 16 * furnished to do so, under the terms of the COPYING file. 17 * 18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 19 * KIND, either express or implied. 20 * 21 ***************************************************************************/ 22 23#include "setup.h" 24 25#include "strtoofft.h" 26#include "strequal.h" 27#include "rawstr.h" 28 29#ifdef HAVE_SYS_SOCKET_H 30#include <sys/socket.h> 31#endif 32#ifdef HAVE_NETINET_IN_H 33#include <netinet/in.h> 34#endif 35#ifdef HAVE_UNISTD_H 36#include <unistd.h> 37#endif 38#ifdef HAVE_NETDB_H 39#include <netdb.h> 40#endif 41#ifdef HAVE_ARPA_INET_H 42#include <arpa/inet.h> 43#endif 44#ifdef HAVE_NET_IF_H 45#include <net/if.h> 46#endif 47#ifdef HAVE_SYS_IOCTL_H 48#include <sys/ioctl.h> 49#endif 50#ifdef HAVE_SIGNAL_H 51#include <signal.h> 52#endif 53 54#ifdef HAVE_SYS_PARAM_H 55#include <sys/param.h> 56#endif 57 58#ifdef HAVE_SYS_SELECT_H 59#include <sys/select.h> 60#endif 61 62#ifndef HAVE_SOCKET 63#error "We can't compile without socket() support!" 64#endif 65 66#include "urldata.h" 67#include <curl/curl.h> 68#include "netrc.h" 69 70#include "content_encoding.h" 71#include "hostip.h" 72#include "transfer.h" 73#include "sendf.h" 74#include "speedcheck.h" 75#include "progress.h" 76#include "http.h" 77#include "url.h" 78#include "getinfo.h" 79#include "sslgen.h" 80#include "http_digest.h" 81#include "curl_ntlm.h" 82#include "http_negotiate.h" 83#include "share.h" 84#include "curl_memory.h" 85#include "select.h" 86#include "multiif.h" 87#include "connect.h" 88#include "non-ascii.h" 89 90#define _MPRINTF_REPLACE /* use our functions only */ 91#include <curl/mprintf.h> 92 93/* The last #include file should be: */ 94#include "memdebug.h" 95 96#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */ 97 98/* 99 * This function will call the read callback to fill our buffer with data 100 * to upload. 101 */ 102CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp) 103{ 104 struct SessionHandle *data = conn->data; 105 size_t buffersize = (size_t)bytes; 106 int nread; 107#ifdef CURL_DOES_CONVERSIONS 108 bool sending_http_headers = FALSE; 109 110 if((conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) && 111 (data->state.proto.http->sending == HTTPSEND_REQUEST)) { 112 /* We're sending the HTTP request headers, not the data. 113 Remember that so we don't re-translate them into garbage. */ 114 sending_http_headers = TRUE; 115 } 116#endif 117 118 if(data->req.upload_chunky) { 119 /* if chunked Transfer-Encoding */ 120 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */ 121 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */ 122 } 123 124 /* this function returns a size_t, so we typecast to int to prevent warnings 125 with picky compilers */ 126 nread = (int)conn->fread_func(data->req.upload_fromhere, 1, 127 buffersize, conn->fread_in); 128 129 if(nread == CURL_READFUNC_ABORT) { 130 failf(data, "operation aborted by callback"); 131 *nreadp = 0; 132 return CURLE_ABORTED_BY_CALLBACK; 133 } 134 else if(nread == CURL_READFUNC_PAUSE) { 135 struct SingleRequest *k = &data->req; 136 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */ 137 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */ 138 if(data->req.upload_chunky) { 139 /* Back out the preallocation done above */ 140 data->req.upload_fromhere -= (8 + 2); 141 } 142 *nreadp = 0; 143 return CURLE_OK; /* nothing was read */ 144 } 145 else if((size_t)nread > buffersize) { 146 /* the read function returned a too large value */ 147 *nreadp = 0; 148 failf(data, "read function returned funny value"); 149 return CURLE_READ_ERROR; 150 } 151 152 if(!data->req.forbidchunk && data->req.upload_chunky) { 153 /* if chunked Transfer-Encoding 154 * build chunk: 155 * 156 * <HEX SIZE> CRLF 157 * <DATA> CRLF 158 */ 159 /* On non-ASCII platforms the <DATA> may or may not be 160 translated based on set.prefer_ascii while the protocol 161 portion must always be translated to the network encoding. 162 To further complicate matters, line end conversion might be 163 done later on, so we need to prevent CRLFs from becoming 164 CRCRLFs if that's the case. To do this we use bare LFs 165 here, knowing they'll become CRLFs later on. 166 */ 167 168 char hexbuffer[11]; 169 const char *endofline_native; 170 const char *endofline_network; 171 int hexlen; 172 173 if( 174#ifdef CURL_DO_LINEEND_CONV 175 (data->set.prefer_ascii) || 176#endif 177 (data->set.crlf)) { 178 /* \n will become \r\n later on */ 179 endofline_native = "\n"; 180 endofline_network = "\x0a"; 181 } 182 else { 183 endofline_native = "\r\n"; 184 endofline_network = "\x0d\x0a"; 185 } 186 hexlen = snprintf(hexbuffer, sizeof(hexbuffer), 187 "%x%s", nread, endofline_native); 188 189 /* move buffer pointer */ 190 data->req.upload_fromhere -= hexlen; 191 nread += hexlen; 192 193 /* copy the prefix to the buffer, leaving out the NUL */ 194 memcpy(data->req.upload_fromhere, hexbuffer, hexlen); 195 196 /* always append ASCII CRLF to the data */ 197 memcpy(data->req.upload_fromhere + nread, 198 endofline_network, 199 strlen(endofline_network)); 200 201#ifdef CURL_DOES_CONVERSIONS 202 CURLcode res; 203 int length; 204 if(data->set.prefer_ascii) { 205 /* translate the protocol and data */ 206 length = nread; 207 } 208 else { 209 /* just translate the protocol portion */ 210 length = strlen(hexbuffer); 211 } 212 res = Curl_convert_to_network(data, data->req.upload_fromhere, length); 213 /* Curl_convert_to_network calls failf if unsuccessful */ 214 if(res) 215 return(res); 216#endif /* CURL_DOES_CONVERSIONS */ 217 218 if((nread - hexlen) == 0) 219 /* mark this as done once this chunk is transferred */ 220 data->req.upload_done = TRUE; 221 222 nread+=(int)strlen(endofline_native); /* for the added end of line */ 223 } 224#ifdef CURL_DOES_CONVERSIONS 225 else if((data->set.prefer_ascii) && (!sending_http_headers)) { 226 CURLcode res; 227 res = Curl_convert_to_network(data, data->req.upload_fromhere, nread); 228 /* Curl_convert_to_network calls failf if unsuccessful */ 229 if(res != CURLE_OK) 230 return(res); 231 } 232#endif /* CURL_DOES_CONVERSIONS */ 233 234 *nreadp = nread; 235 236 return CURLE_OK; 237} 238 239 240/* 241 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP 242 * POST/PUT with multi-pass authentication when a sending was denied and a 243 * resend is necessary. 244 */ 245CURLcode Curl_readrewind(struct connectdata *conn) 246{ 247 struct SessionHandle *data = conn->data; 248 249 conn->bits.rewindaftersend = FALSE; /* we rewind now */ 250 251 /* explicitly switch off sending data on this connection now since we are 252 about to restart a new transfer and thus we want to avoid inadvertently 253 sending more data on the existing connection until the next transfer 254 starts */ 255 data->req.keepon &= ~KEEP_SEND; 256 257 /* We have sent away data. If not using CURLOPT_POSTFIELDS or 258 CURLOPT_HTTPPOST, call app to rewind 259 */ 260 if(data->set.postfields || 261 (data->set.httpreq == HTTPREQ_POST_FORM)) 262 ; /* do nothing */ 263 else { 264 if(data->set.seek_func) { 265 int err; 266 267 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET); 268 if(err) { 269 failf(data, "seek callback returned error %d", (int)err); 270 return CURLE_SEND_FAIL_REWIND; 271 } 272 } 273 else if(data->set.ioctl_func) { 274 curlioerr err; 275 276 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD, 277 data->set.ioctl_client); 278 infof(data, "the ioctl callback returned %d\n", (int)err); 279 280 if(err) { 281 /* FIXME: convert to a human readable error message */ 282 failf(data, "ioctl callback returned error %d", (int)err); 283 return CURLE_SEND_FAIL_REWIND; 284 } 285 } 286 else { 287 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a 288 given FILE * stream and we can actually attempt to rewind that 289 ourselves with fseek() */ 290 if(data->set.fread_func == (curl_read_callback)fread) { 291 if(-1 != fseek(data->set.in, 0, SEEK_SET)) 292 /* successful rewind */ 293 return CURLE_OK; 294 } 295 296 /* no callback set or failure above, makes us fail at once */ 297 failf(data, "necessary data rewind wasn't possible"); 298 return CURLE_SEND_FAIL_REWIND; 299 } 300 } 301 return CURLE_OK; 302} 303 304static int data_pending(const struct connectdata *conn) 305{ 306 /* in the case of libssh2, we can never be really sure that we have emptied 307 its internal buffers so we MUST always try until we get EAGAIN back */ 308 return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) || 309 Curl_ssl_data_pending(conn, FIRSTSOCKET); 310} 311 312static void read_rewind(struct connectdata *conn, 313 size_t thismuch) 314{ 315 DEBUGASSERT(conn->read_pos >= thismuch); 316 317 conn->read_pos -= thismuch; 318 conn->bits.stream_was_rewound = TRUE; 319 320#ifdef DEBUGBUILD 321 { 322 char buf[512 + 1]; 323 size_t show; 324 325 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1); 326 if(conn->master_buffer) { 327 memcpy(buf, conn->master_buffer + conn->read_pos, show); 328 buf[show] = '\0'; 329 } 330 else { 331 buf[0] = '\0'; 332 } 333 334 DEBUGF(infof(conn->data, 335 "Buffer after stream rewind (read_pos = %zu): [%s]", 336 conn->read_pos, buf)); 337 } 338#endif 339} 340 341/* 342 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the 343 * remote document with the time provided by CURLOPT_TIMEVAL 344 */ 345bool Curl_meets_timecondition(struct SessionHandle *data, time_t timeofdoc) 346{ 347 if((timeofdoc == 0) || (data->set.timevalue == 0)) 348 return TRUE; 349 350 switch(data->set.timecondition) { 351 case CURL_TIMECOND_IFMODSINCE: 352 default: 353 if(timeofdoc <= data->set.timevalue) { 354 infof(data, 355 "The requested document is not new enough\n"); 356 data->info.timecond = TRUE; 357 return FALSE; 358 } 359 break; 360 case CURL_TIMECOND_IFUNMODSINCE: 361 if(timeofdoc >= data->set.timevalue) { 362 infof(data, 363 "The requested document is not old enough\n"); 364 data->info.timecond = TRUE; 365 return FALSE; 366 } 367 break; 368 } 369 370 return TRUE; 371} 372 373/* 374 * Go ahead and do a read if we have a readable socket or if 375 * the stream was rewound (in which case we have data in a 376 * buffer) 377 */ 378static CURLcode readwrite_data(struct SessionHandle *data, 379 struct connectdata *conn, 380 struct SingleRequest *k, 381 int *didwhat, bool *done) 382{ 383 CURLcode result = CURLE_OK; 384 ssize_t nread; /* number of bytes read */ 385 size_t excess = 0; /* excess bytes read */ 386 bool is_empty_data = FALSE; 387 bool readmore = FALSE; /* used by RTP to signal for more data */ 388 389 *done = FALSE; 390 391 /* This is where we loop until we have read everything there is to 392 read or we get a CURLE_AGAIN */ 393 do { 394 size_t buffersize = data->set.buffer_size? 395 data->set.buffer_size : BUFSIZE; 396 size_t bytestoread = buffersize; 397 398 if(k->size != -1 && !k->header) { 399 /* make sure we don't read "too much" if we can help it since we 400 might be pipelining and then someone else might want to read what 401 follows! */ 402 curl_off_t totalleft = k->size - k->bytecount; 403 if(totalleft < (curl_off_t)bytestoread) 404 bytestoread = (size_t)totalleft; 405 } 406 407 if(bytestoread) { 408 /* receive data from the network! */ 409 result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread); 410 411 /* read would've blocked */ 412 if(CURLE_AGAIN == result) 413 break; /* get out of loop */ 414 415 if(result>0) 416 return result; 417 } 418 else { 419 /* read nothing but since we wanted nothing we consider this an OK 420 situation to proceed from */ 421 nread = 0; 422 } 423 424 if((k->bytecount == 0) && (k->writebytecount == 0)) { 425 Curl_pgrsTime(data, TIMER_STARTTRANSFER); 426 if(k->exp100 > EXP100_SEND_DATA) 427 /* set time stamp to compare with when waiting for the 100 */ 428 k->start100 = Curl_tvnow(); 429 } 430 431 *didwhat |= KEEP_RECV; 432 /* indicates data of zero size, i.e. empty file */ 433 is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE; 434 435 /* NUL terminate, allowing string ops to be used */ 436 if(0 < nread || is_empty_data) { 437 k->buf[nread] = 0; 438 } 439 else if(0 >= nread) { 440 /* if we receive 0 or less here, the server closed the connection 441 and we bail out from this! */ 442 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n")); 443 k->keepon &= ~KEEP_RECV; 444 break; 445 } 446 447 /* Default buffer to use when we write the buffer, it may be changed 448 in the flow below before the actual storing is done. */ 449 k->str = k->buf; 450 451 if(conn->handler->readwrite) { 452 result = conn->handler->readwrite(data, conn, &nread, &readmore); 453 if(result) 454 return result; 455 if(readmore) 456 break; 457 } 458 459#ifndef CURL_DISABLE_HTTP 460 /* Since this is a two-state thing, we check if we are parsing 461 headers at the moment or not. */ 462 if(k->header) { 463 /* we are in parse-the-header-mode */ 464 bool stop_reading = FALSE; 465 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading); 466 if(result) 467 return result; 468 469 if(conn->handler->readwrite && 470 (k->maxdownload <= 0 && nread > 0)) { 471 result = conn->handler->readwrite(data, conn, &nread, &readmore); 472 if(result) 473 return result; 474 if(readmore) 475 break; 476 } 477 478 if(stop_reading) { 479 /* We've stopped dealing with input, get out of the do-while loop */ 480 481 if(nread > 0) { 482 if(conn->data->multi && Curl_multi_canPipeline(conn->data->multi)) { 483 infof(data, 484 "Rewinding stream by : %zd" 485 " bytes on url %s (zero-length body)\n", 486 nread, data->state.path); 487 read_rewind(conn, (size_t)nread); 488 } 489 else { 490 infof(data, 491 "Excess found in a non pipelined read:" 492 " excess = %zd" 493 " url = %s (zero-length body)\n", 494 nread, data->state.path); 495 } 496 } 497 498 break; 499 } 500 } 501#endif /* CURL_DISABLE_HTTP */ 502 503 504 /* This is not an 'else if' since it may be a rest from the header 505 parsing, where the beginning of the buffer is headers and the end 506 is non-headers. */ 507 if(k->str && !k->header && (nread > 0 || is_empty_data)) { 508 509#ifndef CURL_DISABLE_HTTP 510 if(0 == k->bodywrites && !is_empty_data) { 511 /* These checks are only made the first time we are about to 512 write a piece of the body */ 513 if(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) { 514 /* HTTP-only checks */ 515 516 if(data->req.newurl) { 517 if(conn->bits.close) { 518 /* Abort after the headers if "follow Location" is set 519 and we're set to close anyway. */ 520 k->keepon &= ~KEEP_RECV; 521 *done = TRUE; 522 return CURLE_OK; 523 } 524 /* We have a new url to load, but since we want to be able 525 to re-use this connection properly, we read the full 526 response in "ignore more" */ 527 k->ignorebody = TRUE; 528 infof(data, "Ignoring the response-body\n"); 529 } 530 if(data->state.resume_from && !k->content_range && 531 (data->set.httpreq==HTTPREQ_GET) && 532 !k->ignorebody) { 533 /* we wanted to resume a download, although the server doesn't 534 * seem to support this and we did this with a GET (if it 535 * wasn't a GET we did a POST or PUT resume) */ 536 failf(data, "HTTP server doesn't seem to support " 537 "byte ranges. Cannot resume."); 538 return CURLE_RANGE_ERROR; 539 } 540 541 if(data->set.timecondition && !data->state.range) { 542 /* A time condition has been set AND no ranges have been 543 requested. This seems to be what chapter 13.3.4 of 544 RFC 2616 defines to be the correct action for a 545 HTTP/1.1 client */ 546 547 if(!Curl_meets_timecondition(data, k->timeofdoc)) { 548 *done = TRUE; 549 /* we abort the transfer before it is completed == we ruin the 550 re-use ability. Close the connection */ 551 conn->bits.close = TRUE; 552 return CURLE_OK; 553 } 554 } /* we have a time condition */ 555 556 } /* this is HTTP or RTSP */ 557 } /* this is the first time we write a body part */ 558#endif /* CURL_DISABLE_HTTP */ 559 560 k->bodywrites++; 561 562 /* pass data to the debug function before it gets "dechunked" */ 563 if(data->set.verbose) { 564 if(k->badheader) { 565 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff, 566 (size_t)k->hbuflen, conn); 567 if(k->badheader == HEADER_PARTHEADER) 568 Curl_debug(data, CURLINFO_DATA_IN, 569 k->str, (size_t)nread, conn); 570 } 571 else 572 Curl_debug(data, CURLINFO_DATA_IN, 573 k->str, (size_t)nread, conn); 574 } 575 576#ifndef CURL_DISABLE_HTTP 577 if(k->chunk) { 578 /* 579 * Here comes a chunked transfer flying and we need to decode this 580 * properly. While the name says read, this function both reads 581 * and writes away the data. The returned 'nread' holds the number 582 * of actual data it wrote to the client. 583 */ 584 585 CHUNKcode res = 586 Curl_httpchunk_read(conn, k->str, nread, &nread); 587 588 if(CHUNKE_OK < res) { 589 if(CHUNKE_WRITE_ERROR == res) { 590 failf(data, "Failed writing data"); 591 return CURLE_WRITE_ERROR; 592 } 593 failf(data, "Problem (%d) in the Chunked-Encoded data", (int)res); 594 return CURLE_RECV_ERROR; 595 } 596 else if(CHUNKE_STOP == res) { 597 size_t dataleft; 598 /* we're done reading chunks! */ 599 k->keepon &= ~KEEP_RECV; /* read no more */ 600 601 /* There are now possibly N number of bytes at the end of the 602 str buffer that weren't written to the client. 603 604 We DO care about this data if we are pipelining. 605 Push it back to be read on the next pass. */ 606 607 dataleft = conn->chunk.dataleft; 608 if(dataleft != 0) { 609 infof(conn->data, "Leftovers after chunking: %zu bytes", dataleft); 610 if(conn->data->multi && 611 Curl_multi_canPipeline(conn->data->multi)) { 612 /* only attempt the rewind if we truly are pipelining */ 613 infof(conn->data, "Rewinding %zu bytes\n",dataleft); 614 read_rewind(conn, dataleft); 615 } 616 } 617 } 618 /* If it returned OK, we just keep going */ 619 } 620#endif /* CURL_DISABLE_HTTP */ 621 622 /* Account for body content stored in the header buffer */ 623 if(k->badheader && !k->ignorebody) { 624 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n", 625 k->hbuflen)); 626 k->bytecount += k->hbuflen; 627 } 628 629 if((-1 != k->maxdownload) && 630 (k->bytecount + nread >= k->maxdownload)) { 631 632 excess = (size_t)(k->bytecount + nread - k->maxdownload); 633 if(excess > 0 && !k->ignorebody) { 634 if(conn->data->multi && Curl_multi_canPipeline(conn->data->multi)) { 635 /* The 'excess' amount below can't be more than BUFSIZE which 636 always will fit in a size_t */ 637 infof(data, 638 "Rewinding stream by : %zu" 639 " bytes on url %s (size = %" FORMAT_OFF_T 640 ", maxdownload = %" FORMAT_OFF_T 641 ", bytecount = %" FORMAT_OFF_T ", nread = %zd)\n", 642 excess, data->state.path, 643 k->size, k->maxdownload, k->bytecount, nread); 644 read_rewind(conn, excess); 645 } 646 else { 647 infof(data, 648 "Excess found in a non pipelined read:" 649 " excess = %zu" 650 ", size = %" FORMAT_OFF_T 651 ", maxdownload = %" FORMAT_OFF_T 652 ", bytecount = %" FORMAT_OFF_T "\n", 653 excess, k->size, k->maxdownload, k->bytecount); 654 } 655 } 656 657 nread = (ssize_t) (k->maxdownload - k->bytecount); 658 if(nread < 0 ) /* this should be unusual */ 659 nread = 0; 660 661 k->keepon &= ~KEEP_RECV; /* we're done reading */ 662 } 663 664 k->bytecount += nread; 665 666 Curl_pgrsSetDownloadCounter(data, k->bytecount); 667 668 if(!k->chunk && (nread || k->badheader || is_empty_data)) { 669 /* If this is chunky transfer, it was already written */ 670 671 if(k->badheader && !k->ignorebody) { 672 /* we parsed a piece of data wrongly assuming it was a header 673 and now we output it as body instead */ 674 675 /* Don't let excess data pollute body writes */ 676 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload) 677 result = Curl_client_write(conn, CLIENTWRITE_BODY, 678 data->state.headerbuff, 679 k->hbuflen); 680 else 681 result = Curl_client_write(conn, CLIENTWRITE_BODY, 682 data->state.headerbuff, 683 (size_t)k->maxdownload); 684 685 if(result) 686 return result; 687 } 688 if(k->badheader < HEADER_ALLBAD) { 689 /* This switch handles various content encodings. If there's an 690 error here, be sure to check over the almost identical code 691 in http_chunks.c. 692 Make sure that ALL_CONTENT_ENCODINGS contains all the 693 encodings handled here. */ 694#ifdef HAVE_LIBZ 695 switch (conn->data->set.http_ce_skip ? 696 IDENTITY : k->auto_decoding) { 697 case IDENTITY: 698#endif 699 /* This is the default when the server sends no 700 Content-Encoding header. See Curl_readwrite_init; the 701 memset() call initializes k->auto_decoding to zero. */ 702 if(!k->ignorebody) { 703 704#ifndef CURL_DISABLE_POP3 705 if(conn->handler->protocol&CURLPROTO_POP3) 706 result = Curl_pop3_write(conn, k->str, nread); 707 else 708#endif /* CURL_DISABLE_POP3 */ 709 710 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str, 711 nread); 712 } 713#ifdef HAVE_LIBZ 714 break; 715 716 case DEFLATE: 717 /* Assume CLIENTWRITE_BODY; headers are not encoded. */ 718 if(!k->ignorebody) 719 result = Curl_unencode_deflate_write(conn, k, nread); 720 break; 721 722 case GZIP: 723 /* Assume CLIENTWRITE_BODY; headers are not encoded. */ 724 if(!k->ignorebody) 725 result = Curl_unencode_gzip_write(conn, k, nread); 726 break; 727 728 case COMPRESS: 729 default: 730 failf (data, "Unrecognized content encoding type. " 731 "libcurl understands `identity', `deflate' and `gzip' " 732 "content encodings."); 733 result = CURLE_BAD_CONTENT_ENCODING; 734 break; 735 } 736#endif 737 } 738 k->badheader = HEADER_NORMAL; /* taken care of now */ 739 740 if(result) 741 return result; 742 } 743 744 } /* if(! header and data to read ) */ 745 746 if(conn->handler->readwrite && 747 (excess > 0 && !conn->bits.stream_was_rewound)) { 748 /* Parse the excess data */ 749 k->str += nread; 750 nread = (ssize_t)excess; 751 752 result = conn->handler->readwrite(data, conn, &nread, &readmore); 753 if(result) 754 return result; 755 756 if(readmore) 757 k->keepon |= KEEP_RECV; /* we're not done reading */ 758 break; 759 } 760 761 if(is_empty_data) { 762 /* if we received nothing, the server closed the connection and we 763 are done */ 764 k->keepon &= ~KEEP_RECV; 765 } 766 767 } while(data_pending(conn)); 768 769 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) && 770 conn->bits.close ) { 771 /* When we've read the entire thing and the close bit is set, the server 772 may now close the connection. If there's now any kind of sending going 773 on from our side, we need to stop that immediately. */ 774 infof(data, "we are done reading and this is set to close, stop send\n"); 775 k->keepon &= ~KEEP_SEND; /* no writing anymore either */ 776 } 777 778 return CURLE_OK; 779} 780 781/* 782 * Send data to upload to the server, when the socket is writable. 783 */ 784static CURLcode readwrite_upload(struct SessionHandle *data, 785 struct connectdata *conn, 786 struct SingleRequest *k, 787 int *didwhat) 788{ 789 ssize_t i, si; 790 ssize_t bytes_written; 791 CURLcode result; 792 ssize_t nread; /* number of bytes read */ 793 bool sending_http_headers = FALSE; 794 795 if((k->bytecount == 0) && (k->writebytecount == 0)) 796 Curl_pgrsTime(data, TIMER_STARTTRANSFER); 797 798 *didwhat |= KEEP_SEND; 799 800 /* 801 * We loop here to do the READ and SEND loop until we run out of 802 * data to send or until we get EWOULDBLOCK back 803 * 804 * FIXME: above comment is misleading. Currently no looping is 805 * actually done in do-while loop below. 806 */ 807 do { 808 809 /* only read more data if there's no upload data already 810 present in the upload buffer */ 811 if(0 == data->req.upload_present) { 812 /* init the "upload from here" pointer */ 813 data->req.upload_fromhere = k->uploadbuf; 814 815 if(!k->upload_done) { 816 /* HTTP pollution, this should be written nicer to become more 817 protocol agnostic. */ 818 int fillcount; 819 820 if((k->exp100 == EXP100_SENDING_REQUEST) && 821 (data->state.proto.http->sending == HTTPSEND_BODY)) { 822 /* If this call is to send body data, we must take some action: 823 We have sent off the full HTTP 1.1 request, and we shall now 824 go into the Expect: 100 state and await such a header */ 825 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */ 826 k->keepon &= ~KEEP_SEND; /* disable writing */ 827 k->start100 = Curl_tvnow(); /* timeout count starts now */ 828 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */ 829 830 /* set a timeout for the multi interface */ 831 Curl_expire(data, CURL_TIMEOUT_EXPECT_100); 832 break; 833 } 834 835 if(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) { 836 if(data->state.proto.http->sending == HTTPSEND_REQUEST) 837 /* We're sending the HTTP request headers, not the data. 838 Remember that so we don't change the line endings. */ 839 sending_http_headers = TRUE; 840 else 841 sending_http_headers = FALSE; 842 } 843 844 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount); 845 if(result) 846 return result; 847 848 nread = (ssize_t)fillcount; 849 } 850 else 851 nread = 0; /* we're done uploading/reading */ 852 853 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) { 854 /* this is a paused transfer */ 855 break; 856 } 857 else if(nread<=0) { 858 /* done */ 859 k->keepon &= ~KEEP_SEND; /* we're done writing */ 860 861 if(conn->bits.rewindaftersend) { 862 result = Curl_readrewind(conn); 863 if(result) 864 return result; 865 } 866 break; 867 } 868 869 /* store number of bytes available for upload */ 870 data->req.upload_present = nread; 871 872#ifndef CURL_DISABLE_SMTP 873 if(conn->handler->protocol & CURLPROTO_SMTP) { 874 result = Curl_smtp_escape_eob(conn, nread); 875 if(result) 876 return result; 877 } 878 else 879#endif /* CURL_DISABLE_SMTP */ 880 881 /* convert LF to CRLF if so asked */ 882 if((!sending_http_headers) && ( 883#ifdef CURL_DO_LINEEND_CONV 884 /* always convert if we're FTPing in ASCII mode */ 885 (data->set.prefer_ascii) || 886#endif 887 (data->set.crlf))) { 888 if(data->state.scratch == NULL) 889 data->state.scratch = malloc(2*BUFSIZE); 890 if(data->state.scratch == NULL) { 891 failf (data, "Failed to alloc scratch buffer!"); 892 return CURLE_OUT_OF_MEMORY; 893 } 894 /* 895 * ASCII/EBCDIC Note: This is presumably a text (not binary) 896 * transfer so the data should already be in ASCII. 897 * That means the hex values for ASCII CR (0x0d) & LF (0x0a) 898 * must be used instead of the escape sequences \r & \n. 899 */ 900 for(i = 0, si = 0; i < nread; i++, si++) { 901 if(data->req.upload_fromhere[i] == 0x0a) { 902 data->state.scratch[si++] = 0x0d; 903 data->state.scratch[si] = 0x0a; 904 if(!data->set.crlf) { 905 /* we're here only because FTP is in ASCII mode... 906 bump infilesize for the LF we just added */ 907 data->set.infilesize++; 908 } 909 } 910 else 911 data->state.scratch[si] = data->req.upload_fromhere[i]; 912 } 913 if(si != nread) { 914 /* only perform the special operation if we really did replace 915 anything */ 916 nread = si; 917 918 /* upload from the new (replaced) buffer instead */ 919 data->req.upload_fromhere = data->state.scratch; 920 921 /* set the new amount too */ 922 data->req.upload_present = nread; 923 } 924 } 925 } /* if 0 == data->req.upload_present */ 926 else { 927 /* We have a partial buffer left from a previous "round". Use 928 that instead of reading more data */ 929 } 930 931 /* write to socket (send away data) */ 932 result = Curl_write(conn, 933 conn->writesockfd, /* socket to send to */ 934 data->req.upload_fromhere, /* buffer pointer */ 935 data->req.upload_present, /* buffer size */ 936 &bytes_written); /* actually sent */ 937 938 if(result) 939 return result; 940 941 if(data->set.verbose) 942 /* show the data before we change the pointer upload_fromhere */ 943 Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere, 944 (size_t)bytes_written, conn); 945 946 k->writebytecount += bytes_written; 947 948 if(k->writebytecount == data->set.infilesize) { 949 /* we have sent all data we were supposed to */ 950 k->upload_done = TRUE; 951 infof(data, "We are completely uploaded and fine\n"); 952 } 953 954 if(data->req.upload_present != bytes_written) { 955 /* we only wrote a part of the buffer (if anything), deal with it! */ 956 957 /* store the amount of bytes left in the buffer to write */ 958 data->req.upload_present -= bytes_written; 959 960 /* advance the pointer where to find the buffer when the next send 961 is to happen */ 962 data->req.upload_fromhere += bytes_written; 963 } 964 else { 965 /* we've uploaded that buffer now */ 966 data->req.upload_fromhere = k->uploadbuf; 967 data->req.upload_present = 0; /* no more bytes left */ 968 969 if(k->upload_done) { 970 /* switch off writing, we're done! */ 971 k->keepon &= ~KEEP_SEND; /* we're done writing */ 972 } 973 } 974 975 Curl_pgrsSetUploadCounter(data, k->writebytecount); 976 977 } WHILE_FALSE; /* just to break out from! */ 978 979 return CURLE_OK; 980} 981 982/* 983 * Curl_readwrite() is the low-level function to be called when data is to 984 * be read and written to/from the connection. 985 */ 986CURLcode Curl_readwrite(struct connectdata *conn, 987 bool *done) 988{ 989 struct SessionHandle *data = conn->data; 990 struct SingleRequest *k = &data->req; 991 CURLcode result; 992 int didwhat=0; 993 994 curl_socket_t fd_read; 995 curl_socket_t fd_write; 996 int select_res = conn->cselect_bits; 997 998 conn->cselect_bits = 0; 999 1000 /* only use the proper socket if the *_HOLD bit is not set simultaneously as 1001 then we are in rate limiting state in that transfer direction */ 1002 1003 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV) 1004 fd_read = conn->sockfd; 1005 else 1006 fd_read = CURL_SOCKET_BAD; 1007 1008 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND) 1009 fd_write = conn->writesockfd; 1010 else 1011 fd_write = CURL_SOCKET_BAD; 1012 1013 if(!select_res) /* Call for select()/poll() only, if read/write/error 1014 status is not known. */ 1015 select_res = Curl_socket_ready(fd_read, fd_write, 0); 1016 1017 if(select_res == CURL_CSELECT_ERR) { 1018 failf(data, "select/poll returned error"); 1019 return CURLE_SEND_ERROR; 1020 } 1021 1022 /* We go ahead and do a read if we have a readable socket or if 1023 the stream was rewound (in which case we have data in a 1024 buffer) */ 1025 if((k->keepon & KEEP_RECV) && 1026 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) { 1027 1028 result = readwrite_data(data, conn, k, &didwhat, done); 1029 if(result || *done) 1030 return result; 1031 } 1032 else if(k->keepon & KEEP_RECV) { 1033 DEBUGF(infof(data, "additional stuff not fine %s:%d: %d %d\n", 1034 __FILE__, __LINE__, 1035 select_res & CURL_CSELECT_IN, 1036 conn->bits.stream_was_rewound)); 1037 } 1038 1039 /* If we still have writing to do, we check if we have a writable socket. */ 1040 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) { 1041 /* write */ 1042 1043 result = readwrite_upload(data, conn, k, &didwhat); 1044 if(result) 1045 return result; 1046 } 1047 1048 k->now = Curl_tvnow(); 1049 if(didwhat) { 1050 /* Update read/write counters */ 1051 if(k->bytecountp) 1052 *k->bytecountp = k->bytecount; /* read count */ 1053 if(k->writebytecountp) 1054 *k->writebytecountp = k->writebytecount; /* write count */ 1055 } 1056 else { 1057 /* no read no write, this is a timeout? */ 1058 if(k->exp100 == EXP100_AWAITING_CONTINUE) { 1059 /* This should allow some time for the header to arrive, but only a 1060 very short time as otherwise it'll be too much wasted time too 1061 often. */ 1062 1063 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status": 1064 1065 Therefore, when a client sends this header field to an origin server 1066 (possibly via a proxy) from which it has never seen a 100 (Continue) 1067 status, the client SHOULD NOT wait for an indefinite period before 1068 sending the request body. 1069 1070 */ 1071 1072 long ms = Curl_tvdiff(k->now, k->start100); 1073 if(ms > CURL_TIMEOUT_EXPECT_100) { 1074 /* we've waited long enough, continue anyway */ 1075 k->exp100 = EXP100_SEND_DATA; 1076 k->keepon |= KEEP_SEND; 1077 infof(data, "Done waiting for 100-continue\n"); 1078 } 1079 } 1080 } 1081 1082 if(Curl_pgrsUpdate(conn)) 1083 result = CURLE_ABORTED_BY_CALLBACK; 1084 else 1085 result = Curl_speedcheck(data, k->now); 1086 if(result) 1087 return result; 1088 1089 if(k->keepon) { 1090 if(0 > Curl_timeleft(data, &k->now, FALSE)) { 1091 if(k->size != -1) { 1092 failf(data, "Operation timed out after %ld milliseconds with %" 1093 FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received", 1094 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount, 1095 k->size); 1096 } 1097 else { 1098 failf(data, "Operation timed out after %ld milliseconds with %" 1099 FORMAT_OFF_T " bytes received", 1100 Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount); 1101 } 1102 return CURLE_OPERATION_TIMEDOUT; 1103 } 1104 } 1105 else { 1106 /* 1107 * The transfer has been performed. Just make some general checks before 1108 * returning. 1109 */ 1110 1111 if(!(data->set.opt_no_body) && (k->size != -1) && 1112 (k->bytecount != k->size) && 1113#ifdef CURL_DO_LINEEND_CONV 1114 /* Most FTP servers don't adjust their file SIZE response for CRLFs, 1115 so we'll check to see if the discrepancy can be explained 1116 by the number of CRLFs we've changed to LFs. 1117 */ 1118 (k->bytecount != (k->size + data->state.crlf_conversions)) && 1119#endif /* CURL_DO_LINEEND_CONV */ 1120 !data->req.newurl) { 1121 failf(data, "transfer closed with %" FORMAT_OFF_T 1122 " bytes remaining to read", 1123 k->size - k->bytecount); 1124 return CURLE_PARTIAL_FILE; 1125 } 1126 else if(!(data->set.opt_no_body) && 1127 k->chunk && 1128 (conn->chunk.state != CHUNK_STOP)) { 1129 /* 1130 * In chunked mode, return an error if the connection is closed prior to 1131 * the empty (terminating) chunk is read. 1132 * 1133 * The condition above used to check for 1134 * conn->proto.http->chunk.datasize != 0 which is true after reading 1135 * *any* chunk, not just the empty chunk. 1136 * 1137 */ 1138 failf(data, "transfer closed with outstanding read data remaining"); 1139 return CURLE_PARTIAL_FILE; 1140 } 1141 if(Curl_pgrsUpdate(conn)) 1142 return CURLE_ABORTED_BY_CALLBACK; 1143 } 1144 1145 /* Now update the "done" boolean we return */ 1146 *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND| 1147 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE; 1148 1149 return CURLE_OK; 1150} 1151 1152/* 1153 * Curl_single_getsock() gets called by the multi interface code when the app 1154 * has requested to get the sockets for the current connection. This function 1155 * will then be called once for every connection that the multi interface 1156 * keeps track of. This function will only be called for connections that are 1157 * in the proper state to have this information available. 1158 */ 1159int Curl_single_getsock(const struct connectdata *conn, 1160 curl_socket_t *sock, /* points to numsocks number 1161 of sockets */ 1162 int numsocks) 1163{ 1164 const struct SessionHandle *data = conn->data; 1165 int bitmap = GETSOCK_BLANK; 1166 unsigned sockindex = 0; 1167 1168 if(conn->handler->perform_getsock) 1169 return conn->handler->perform_getsock(conn, sock, numsocks); 1170 1171 if(numsocks < 2) 1172 /* simple check but we might need two slots */ 1173 return GETSOCK_BLANK; 1174 1175 /* don't include HOLD and PAUSE connections */ 1176 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) { 1177 1178 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD); 1179 1180 bitmap |= GETSOCK_READSOCK(sockindex); 1181 sock[sockindex] = conn->sockfd; 1182 } 1183 1184 /* don't include HOLD and PAUSE connections */ 1185 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) { 1186 1187 if((conn->sockfd != conn->writesockfd) || 1188 !(data->req.keepon & KEEP_RECV)) { 1189 /* only if they are not the same socket or we didn't have a readable 1190 one, we increase index */ 1191 if(data->req.keepon & KEEP_RECV) 1192 sockindex++; /* increase index if we need two entries */ 1193 1194 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD); 1195 1196 sock[sockindex] = conn->writesockfd; 1197 } 1198 1199 bitmap |= GETSOCK_WRITESOCK(sockindex); 1200 } 1201 1202 return bitmap; 1203} 1204 1205/* 1206 * Determine optimum sleep time based on configured rate, current rate, 1207 * and packet size. 1208 * Returns value in milliseconds. 1209 * 1210 * The basic idea is to adjust the desired rate up/down in this method 1211 * based on whether we are running too slow or too fast. Then, calculate 1212 * how many milliseconds to wait for the next packet to achieve this new 1213 * rate. 1214 */ 1215long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps, 1216 int pkt_size) 1217{ 1218 curl_off_t min_sleep = 0; 1219 curl_off_t rv = 0; 1220 1221 if(rate_bps == 0) 1222 return 0; 1223 1224 /* If running faster than about .1% of the desired speed, slow 1225 * us down a bit. Use shift instead of division as the 0.1% 1226 * cutoff is arbitrary anyway. 1227 */ 1228 if(cur_rate_bps > (rate_bps + (rate_bps >> 10))) { 1229 /* running too fast, decrease target rate by 1/64th of rate */ 1230 rate_bps -= rate_bps >> 6; 1231 min_sleep = 1; 1232 } 1233 else if(cur_rate_bps < (rate_bps - (rate_bps >> 10))) { 1234 /* running too slow, increase target rate by 1/64th of rate */ 1235 rate_bps += rate_bps >> 6; 1236 } 1237 1238 /* Determine number of milliseconds to wait until we do 1239 * the next packet at the adjusted rate. We should wait 1240 * longer when using larger packets, for instance. 1241 */ 1242 rv = ((curl_off_t)((pkt_size * 8) * 1000) / rate_bps); 1243 1244 /* Catch rounding errors and always slow down at least 1ms if 1245 * we are running too fast. 1246 */ 1247 if(rv < min_sleep) 1248 rv = min_sleep; 1249 1250 /* Bound value to fit in 'long' on 32-bit platform. That's 1251 * plenty long enough anyway! 1252 */ 1253 if(rv > 0x7fffffff) 1254 rv = 0x7fffffff; 1255 1256 return (long)rv; 1257} 1258 1259 1260/* 1261 * Transfer() 1262 * 1263 * This function is what performs the actual transfer. It is capable of doing 1264 * both ways simultaneously. The transfer must already have been setup by a 1265 * call to Curl_setup_transfer(). 1266 * 1267 * Note that headers are created in a preallocated buffer of a default size. 1268 * That buffer can be enlarged on demand, but it is never shrunken again. 1269 * 1270 */ 1271 1272static CURLcode 1273Transfer(struct connectdata *conn) 1274{ 1275 CURLcode result; 1276 struct SessionHandle *data = conn->data; 1277 struct SingleRequest *k = &data->req; 1278 bool done=FALSE; 1279 bool first=TRUE; 1280 long timeout_ms; 1281 int buffersize; 1282 long totmp; 1283 1284 if((conn->sockfd == CURL_SOCKET_BAD) && 1285 (conn->writesockfd == CURL_SOCKET_BAD)) 1286 /* nothing to read, nothing to write, we're already OK! */ 1287 return CURLE_OK; 1288 1289 /* we want header and/or body, if neither then don't do this! */ 1290 if(!k->getheader && data->set.opt_no_body) 1291 return CURLE_OK; 1292 1293 while(!done) { 1294 curl_socket_t fd_read = conn->sockfd; 1295 curl_socket_t fd_write = conn->writesockfd; 1296 int keepon = k->keepon; 1297 timeout_ms = 1000; 1298 1299 if(conn->waitfor) { 1300 /* if waitfor is set, get the RECV and SEND bits from that but keep the 1301 other bits */ 1302 keepon &= ~ (KEEP_RECV|KEEP_SEND); 1303 keepon |= conn->waitfor & (KEEP_RECV|KEEP_SEND); 1304 } 1305 1306 /* limit-rate logic: if speed exceeds threshold, then do not include fd in 1307 select set. The current speed is recalculated in each Curl_readwrite() 1308 call */ 1309 if((keepon & KEEP_SEND) && 1310 (!data->set.max_send_speed || 1311 (data->progress.ulspeed < data->set.max_send_speed) )) { 1312 k->keepon &= ~KEEP_SEND_HOLD; 1313 } 1314 else { 1315 if(data->set.upload && data->set.max_send_speed && 1316 (data->progress.ulspeed > data->set.max_send_speed) ) { 1317 /* calculate upload rate-limitation timeout. */ 1318 buffersize = (int)(data->set.buffer_size ? 1319 data->set.buffer_size : BUFSIZE); 1320 totmp = Curl_sleep_time(data->set.max_send_speed, 1321 data->progress.ulspeed, buffersize); 1322 if(totmp < timeout_ms) 1323 timeout_ms = totmp; 1324 } 1325 fd_write = CURL_SOCKET_BAD; 1326 if(keepon & KEEP_SEND) 1327 k->keepon |= KEEP_SEND_HOLD; /* hold it */ 1328 } 1329 1330 if((keepon & KEEP_RECV) && 1331 (!data->set.max_recv_speed || 1332 (data->progress.dlspeed < data->set.max_recv_speed)) ) { 1333 k->keepon &= ~KEEP_RECV_HOLD; 1334 } 1335 else { 1336 if((!data->set.upload) && data->set.max_recv_speed && 1337 (data->progress.dlspeed > data->set.max_recv_speed)) { 1338 /* Calculate download rate-limitation timeout. */ 1339 buffersize = (int)(data->set.buffer_size ? 1340 data->set.buffer_size : BUFSIZE); 1341 totmp = Curl_sleep_time(data->set.max_recv_speed, 1342 data->progress.dlspeed, buffersize); 1343 if(totmp < timeout_ms) 1344 timeout_ms = totmp; 1345 } 1346 fd_read = CURL_SOCKET_BAD; 1347 if(keepon & KEEP_RECV) 1348 k->keepon |= KEEP_RECV_HOLD; /* hold it */ 1349 } 1350 1351 /* pause logic. Don't check descriptors for paused connections */ 1352 if(k->keepon & KEEP_RECV_PAUSE) 1353 fd_read = CURL_SOCKET_BAD; 1354 if(k->keepon & KEEP_SEND_PAUSE) 1355 fd_write = CURL_SOCKET_BAD; 1356 1357 /* The *_HOLD and *_PAUSE logic is necessary since even though there might 1358 be no traffic during the select interval, we still call 1359 Curl_readwrite() for the timeout case and if we limit transfer speed we 1360 must make sure that this function doesn't transfer anything while in 1361 HOLD status. 1362 1363 The no timeout for the first round is for the protocols for which data 1364 has already been slurped off the socket and thus waiting for action 1365 won't work since it'll wait even though there is already data present 1366 to work with. */ 1367 if(first && 1368 ((fd_read != CURL_SOCKET_BAD) || (fd_write != CURL_SOCKET_BAD))) 1369 /* if this is the first lap and one of the file descriptors is fine 1370 to work with, skip the timeout */ 1371 timeout_ms = 0; 1372 else { 1373 totmp = Curl_timeleft(data, &k->now, FALSE); 1374 if(totmp < 0) 1375 return CURLE_OPERATION_TIMEDOUT; 1376 else if(!totmp) 1377 totmp = 1000; 1378 1379 if(totmp < timeout_ms) 1380 timeout_ms = totmp; 1381 } 1382 1383 switch (Curl_socket_ready(fd_read, fd_write, timeout_ms)) { 1384 case -1: /* select() error, stop reading */ 1385#ifdef EINTR 1386 /* The EINTR is not serious, and it seems you might get this more 1387 often when using the lib in a multi-threaded environment! */ 1388 if(SOCKERRNO == EINTR) 1389 continue; 1390#endif 1391 return CURLE_RECV_ERROR; /* indicate a network problem */ 1392 case 0: /* timeout */ 1393 default: /* readable descriptors */ 1394 1395 result = Curl_readwrite(conn, &done); 1396 /* "done" signals to us if the transfer(s) are ready */ 1397 break; 1398 } 1399 if(result) 1400 return result; 1401 1402 first = FALSE; /* not the first lap anymore */ 1403 } 1404 1405 return CURLE_OK; 1406} 1407 1408static CURLcode loadhostpairs(struct SessionHandle *data) 1409{ 1410 struct curl_slist *hostp; 1411 char hostname[256]; 1412 char address[256]; 1413 int port; 1414 1415 for(hostp = data->change.resolve; hostp; hostp = hostp->next ) { 1416 if(!hostp->data) 1417 continue; 1418 if(hostp->data[0] == '-') { 1419 /* TODO: mark an entry for removal */ 1420 } 1421 else if(3 == sscanf(hostp->data, "%255[^:]:%d:%255s", hostname, &port, 1422 address)) { 1423 struct Curl_dns_entry *dns; 1424 Curl_addrinfo *addr; 1425 1426 addr = Curl_str2addr(address, port); 1427 if(!addr) { 1428 infof(data, "Resolve %s found illegal!\n", hostp->data); 1429 continue; 1430 } 1431 infof(data, "Added %s:%d:%s to DNS cache\n", 1432 hostname, port, address); 1433 1434 if(data->share) 1435 Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE); 1436 1437 /* put this host in the cache */ 1438 dns = Curl_cache_addr(data, addr, hostname, port); 1439 1440 if(data->share) 1441 Curl_share_unlock(data, CURL_LOCK_DATA_DNS); 1442 1443 if(!dns) { 1444 Curl_freeaddrinfo(addr); 1445 return CURLE_OUT_OF_MEMORY; 1446 } 1447 } 1448 } 1449 data->change.resolve = NULL; /* dealt with now */ 1450 1451 return CURLE_OK; 1452} 1453 1454 1455/* 1456 * Curl_pretransfer() is called immediately before a transfer starts. 1457 */ 1458CURLcode Curl_pretransfer(struct SessionHandle *data) 1459{ 1460 CURLcode res; 1461 if(!data->change.url) { 1462 /* we can't do anything without URL */ 1463 failf(data, "No URL set!"); 1464 return CURLE_URL_MALFORMAT; 1465 } 1466 1467 /* Init the SSL session ID cache here. We do it here since we want to do it 1468 after the *_setopt() calls (that could change the size of the cache) but 1469 before any transfer takes place. */ 1470 res = Curl_ssl_initsessions(data, data->set.ssl.numsessions); 1471 if(res) 1472 return res; 1473 1474 data->set.followlocation=0; /* reset the location-follow counter */ 1475 data->state.this_is_a_follow = FALSE; /* reset this */ 1476 data->state.errorbuf = FALSE; /* no error has occurred */ 1477 data->state.httpversion = 0; /* don't assume any particular server version */ 1478 1479 data->state.ssl_connect_retry = FALSE; 1480 1481 data->state.authproblem = FALSE; 1482 data->state.authhost.want = data->set.httpauth; 1483 data->state.authproxy.want = data->set.proxyauth; 1484 Curl_safefree(data->info.wouldredirect); 1485 data->info.wouldredirect = NULL; 1486 1487 /* If there is a list of cookie files to read, do it now! */ 1488 if(data->change.cookielist) 1489 Curl_cookie_loadfiles(data); 1490 1491 /* If there is a list of host pairs to deal with */ 1492 if(data->change.resolve) 1493 res = loadhostpairs(data); 1494 1495 if(!res) { 1496 /* Allow data->set.use_port to set which port to use. This needs to be 1497 * disabled for example when we follow Location: headers to URLs using 1498 * different ports! */ 1499 data->state.allow_port = TRUE; 1500 1501#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL) 1502 /************************************************************* 1503 * Tell signal handler to ignore SIGPIPE 1504 *************************************************************/ 1505 if(!data->set.no_signal) 1506 data->state.prev_signal = signal(SIGPIPE, SIG_IGN); 1507#endif 1508 1509 Curl_initinfo(data); /* reset session-specific information "variables" */ 1510 Curl_pgrsStartNow(data); 1511 1512 if(data->set.timeout) 1513 Curl_expire(data, data->set.timeout); 1514 1515 if(data->set.connecttimeout) 1516 Curl_expire(data, data->set.connecttimeout); 1517 } 1518 1519 return res; 1520} 1521 1522/* 1523 * Curl_posttransfer() is called immediately after a transfer ends 1524 */ 1525CURLcode Curl_posttransfer(struct SessionHandle *data) 1526{ 1527#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL) 1528 /* restore the signal handler for SIGPIPE before we get back */ 1529 if(!data->set.no_signal) 1530 signal(SIGPIPE, data->state.prev_signal); 1531#else 1532 (void)data; /* unused parameter */ 1533#endif 1534 1535 return CURLE_OK; 1536} 1537 1538#ifndef CURL_DISABLE_HTTP 1539/* 1540 * strlen_url() returns the length of the given URL if the spaces within the 1541 * URL were properly URL encoded. 1542 */ 1543static size_t strlen_url(const char *url) 1544{ 1545 const char *ptr; 1546 size_t newlen=0; 1547 bool left=TRUE; /* left side of the ? */ 1548 1549 for(ptr=url; *ptr; ptr++) { 1550 switch(*ptr) { 1551 case '?': 1552 left=FALSE; 1553 /* fall through */ 1554 default: 1555 newlen++; 1556 break; 1557 case ' ': 1558 if(left) 1559 newlen+=3; 1560 else 1561 newlen++; 1562 break; 1563 } 1564 } 1565 return newlen; 1566} 1567 1568/* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in 1569 * the source URL accordingly. 1570 */ 1571static void strcpy_url(char *output, const char *url) 1572{ 1573 /* we must add this with whitespace-replacing */ 1574 bool left=TRUE; 1575 const char *iptr; 1576 char *optr = output; 1577 for(iptr = url; /* read from here */ 1578 *iptr; /* until zero byte */ 1579 iptr++) { 1580 switch(*iptr) { 1581 case '?': 1582 left=FALSE; 1583 /* fall through */ 1584 default: 1585 *optr++=*iptr; 1586 break; 1587 case ' ': 1588 if(left) { 1589 *optr++='%'; /* add a '%' */ 1590 *optr++='2'; /* add a '2' */ 1591 *optr++='0'; /* add a '0' */ 1592 } 1593 else 1594 *optr++='+'; /* add a '+' here */ 1595 break; 1596 } 1597 } 1598 *optr=0; /* zero terminate output buffer */ 1599 1600} 1601 1602/* 1603 * Returns true if the given URL is absolute (as opposed to relative) 1604 */ 1605static bool is_absolute_url(const char *url) 1606{ 1607 char prot[16]; /* URL protocol string storage */ 1608 char letter; /* used for a silly sscanf */ 1609 1610 return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE; 1611} 1612 1613/* 1614 * Concatenate a relative URL to a base URL making it absolute. 1615 * URL-encodes any spaces. 1616 * The returned pointer must be freed by the caller unless NULL 1617 * (returns NULL on out of memory). 1618 */ 1619static char *concat_url(const char *base, const char *relurl) 1620{ 1621 /*** 1622 TRY to append this new path to the old URL 1623 to the right of the host part. Oh crap, this is doomed to cause 1624 problems in the future... 1625 */ 1626 char *newest; 1627 char *protsep; 1628 char *pathsep; 1629 size_t newlen; 1630 1631 const char *useurl = relurl; 1632 size_t urllen; 1633 1634 /* we must make our own copy of the URL to play with, as it may 1635 point to read-only data */ 1636 char *url_clone=strdup(base); 1637 1638 if(!url_clone) 1639 return NULL; /* skip out of this NOW */ 1640 1641 /* protsep points to the start of the host name */ 1642 protsep=strstr(url_clone, "//"); 1643 if(!protsep) 1644 protsep=url_clone; 1645 else 1646 protsep+=2; /* pass the slashes */ 1647 1648 if('/' != relurl[0]) { 1649 int level=0; 1650 1651 /* First we need to find out if there's a ?-letter in the URL, 1652 and cut it and the right-side of that off */ 1653 pathsep = strchr(protsep, '?'); 1654 if(pathsep) 1655 *pathsep=0; 1656 1657 /* we have a relative path to append to the last slash if there's one 1658 available, or if the new URL is just a query string (starts with a 1659 '?') we append the new one at the end of the entire currently worked 1660 out URL */ 1661 if(useurl[0] != '?') { 1662 pathsep = strrchr(protsep, '/'); 1663 if(pathsep) 1664 *pathsep=0; 1665 } 1666 1667 /* Check if there's any slash after the host name, and if so, remember 1668 that position instead */ 1669 pathsep = strchr(protsep, '/'); 1670 if(pathsep) 1671 protsep = pathsep+1; 1672 else 1673 protsep = NULL; 1674 1675 /* now deal with one "./" or any amount of "../" in the newurl 1676 and act accordingly */ 1677 1678 if((useurl[0] == '.') && (useurl[1] == '/')) 1679 useurl+=2; /* just skip the "./" */ 1680 1681 while((useurl[0] == '.') && 1682 (useurl[1] == '.') && 1683 (useurl[2] == '/')) { 1684 level++; 1685 useurl+=3; /* pass the "../" */ 1686 } 1687 1688 if(protsep) { 1689 while(level--) { 1690 /* cut off one more level from the right of the original URL */ 1691 pathsep = strrchr(protsep, '/'); 1692 if(pathsep) 1693 *pathsep=0; 1694 else { 1695 *protsep=0; 1696 break; 1697 } 1698 } 1699 } 1700 } 1701 else { 1702 /* We got a new absolute path for this server */ 1703 1704 if((relurl[0] == '/') && (relurl[1] == '/')) { 1705 /* the new URL starts with //, just keep the protocol part from the 1706 original one */ 1707 *protsep=0; 1708 useurl = &relurl[2]; /* we keep the slashes from the original, so we 1709 skip the new ones */ 1710 } 1711 else { 1712 /* cut off the original URL from the first slash, or deal with URLs 1713 without slash */ 1714 pathsep = strchr(protsep, '/'); 1715 if(pathsep) { 1716 /* When people use badly formatted URLs, such as 1717 "http://www.url.com?dir=/home/daniel" we must not use the first 1718 slash, if there's a ?-letter before it! */ 1719 char *sep = strchr(protsep, '?'); 1720 if(sep && (sep < pathsep)) 1721 pathsep = sep; 1722 *pathsep=0; 1723 } 1724 else { 1725 /* There was no slash. Now, since we might be operating on a badly 1726 formatted URL, such as "http://www.url.com?id=2380" which doesn't 1727 use a slash separator as it is supposed to, we need to check for a 1728 ?-letter as well! */ 1729 pathsep = strchr(protsep, '?'); 1730 if(pathsep) 1731 *pathsep=0; 1732 } 1733 } 1734 } 1735 1736 /* If the new part contains a space, this is a mighty stupid redirect 1737 but we still make an effort to do "right". To the left of a '?' 1738 letter we replace each space with %20 while it is replaced with '+' 1739 on the right side of the '?' letter. 1740 */ 1741 newlen = strlen_url(useurl); 1742 1743 urllen = strlen(url_clone); 1744 1745 newest = malloc(urllen + 1 + /* possible slash */ 1746 newlen + 1 /* zero byte */); 1747 1748 if(!newest) { 1749 free(url_clone); /* don't leak this */ 1750 return NULL; 1751 } 1752 1753 /* copy over the root url part */ 1754 memcpy(newest, url_clone, urllen); 1755 1756 /* check if we need to append a slash */ 1757 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0])) 1758 ; 1759 else 1760 newest[urllen++]='/'; 1761 1762 /* then append the new piece on the right side */ 1763 strcpy_url(&newest[urllen], useurl); 1764 1765 free(url_clone); 1766 1767 return newest; 1768} 1769#endif /* CURL_DISABLE_HTTP */ 1770 1771/* 1772 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string 1773 * as given by the remote server and set up the new URL to request. 1774 */ 1775CURLcode Curl_follow(struct SessionHandle *data, 1776 char *newurl, /* this 'newurl' is the Location: string, 1777 and it must be malloc()ed before passed 1778 here */ 1779 followtype type) /* see transfer.h */ 1780{ 1781#ifdef CURL_DISABLE_HTTP 1782 (void)data; 1783 (void)newurl; 1784 (void)type; 1785 /* Location: following will not happen when HTTP is disabled */ 1786 return CURLE_TOO_MANY_REDIRECTS; 1787#else 1788 1789 /* Location: redirect */ 1790 bool disallowport = FALSE; 1791 1792 if(type == FOLLOW_REDIR) { 1793 if((data->set.maxredirs != -1) && 1794 (data->set.followlocation >= data->set.maxredirs)) { 1795 failf(data,"Maximum (%ld) redirects followed", data->set.maxredirs); 1796 return CURLE_TOO_MANY_REDIRECTS; 1797 } 1798 1799 /* mark the next request as a followed location: */ 1800 data->state.this_is_a_follow = TRUE; 1801 1802 data->set.followlocation++; /* count location-followers */ 1803 1804 if(data->set.http_auto_referer) { 1805 /* We are asked to automatically set the previous URL as the referer 1806 when we get the next URL. We pick the ->url field, which may or may 1807 not be 100% correct */ 1808 1809 if(data->change.referer_alloc) { 1810 Curl_safefree(data->change.referer); 1811 data->change.referer_alloc = FALSE; 1812 } 1813 1814 data->change.referer = strdup(data->change.url); 1815 if(!data->change.referer) 1816 return CURLE_OUT_OF_MEMORY; 1817 data->change.referer_alloc = TRUE; /* yes, free this later */ 1818 } 1819 } 1820 1821 if(!is_absolute_url(newurl)) { 1822 /*** 1823 *DANG* this is an RFC 2068 violation. The URL is supposed 1824 to be absolute and this doesn't seem to be that! 1825 */ 1826 char *absolute = concat_url(data->change.url, newurl); 1827 if(!absolute) 1828 return CURLE_OUT_OF_MEMORY; 1829 free(newurl); 1830 newurl = absolute; 1831 } 1832 else { 1833 /* This is an absolute URL, don't allow the custom port number */ 1834 disallowport = TRUE; 1835 1836 if(strchr(newurl, ' ')) { 1837 /* This new URL contains at least one space, this is a mighty stupid 1838 redirect but we still make an effort to do "right". */ 1839 char *newest; 1840 size_t newlen = strlen_url(newurl); 1841 1842 newest = malloc(newlen+1); /* get memory for this */ 1843 if(!newest) 1844 return CURLE_OUT_OF_MEMORY; 1845 strcpy_url(newest, newurl); /* create a space-free URL */ 1846 1847 free(newurl); /* that was no good */ 1848 newurl = newest; /* use this instead now */ 1849 } 1850 1851 } 1852 1853 if(type == FOLLOW_FAKE) { 1854 /* we're only figuring out the new url if we would've followed locations 1855 but now we're done so we can get out! */ 1856 data->info.wouldredirect = newurl; 1857 return CURLE_OK; 1858 } 1859 1860 if(disallowport) 1861 data->state.allow_port = FALSE; 1862 1863 if(data->change.url_alloc) { 1864 Curl_safefree(data->change.url); 1865 data->change.url_alloc = FALSE; 1866 } 1867 1868 data->change.url = newurl; 1869 data->change.url_alloc = TRUE; 1870 newurl = NULL; /* don't free! */ 1871 1872 infof(data, "Issue another request to this URL: '%s'\n", data->change.url); 1873 1874 /* 1875 * We get here when the HTTP code is 300-399 (and 401). We need to perform 1876 * differently based on exactly what return code there was. 1877 * 1878 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on 1879 * a HTTP (proxy-) authentication scheme other than Basic. 1880 */ 1881 switch(data->info.httpcode) { 1882 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the 1883 Authorization: XXXX header in the HTTP request code snippet */ 1884 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the 1885 Proxy-Authorization: XXXX header in the HTTP request code snippet */ 1886 /* 300 - Multiple Choices */ 1887 /* 306 - Not used */ 1888 /* 307 - Temporary Redirect */ 1889 default: /* for all above (and the unknown ones) */ 1890 /* Some codes are explicitly mentioned since I've checked RFC2616 and they 1891 * seem to be OK to POST to. 1892 */ 1893 break; 1894 case 301: /* Moved Permanently */ 1895 /* (quote from RFC2616, section 10.3.2): 1896 * 1897 * When automatically redirecting a POST request after receiving a 301 1898 * status code, some existing HTTP/1.0 user agents will erroneously change 1899 * it into a GET request. 1900 * 1901 * ---- 1902 * 1903 * As most of the important user agents do this obvious RFC2616 violation, 1904 * many webservers expect this. So these servers often answers to a POST 1905 * request with an error page. To be sure that libcurl gets the page that 1906 * most user agents would get, libcurl has to force GET. 1907 * 1908 * This behavior can be overridden with CURLOPT_POSTREDIR. 1909 */ 1910 if((data->set.httpreq == HTTPREQ_POST 1911 || data->set.httpreq == HTTPREQ_POST_FORM) 1912 && !data->set.post301) { 1913 infof(data, 1914 "Violate RFC 2616/10.3.2 and switch from POST to GET\n"); 1915 data->set.httpreq = HTTPREQ_GET; 1916 } 1917 break; 1918 case 302: /* Found */ 1919 /* (From 10.3.3) 1920 1921 Note: RFC 1945 and RFC 2068 specify that the client is not allowed 1922 to change the method on the redirected request. However, most 1923 existing user agent implementations treat 302 as if it were a 303 1924 response, performing a GET on the Location field-value regardless 1925 of the original request method. The status codes 303 and 307 have 1926 been added for servers that wish to make unambiguously clear which 1927 kind of reaction is expected of the client. 1928 1929 (From 10.3.4) 1930 1931 Note: Many pre-HTTP/1.1 user agents do not understand the 303 1932 status. When interoperability with such clients is a concern, the 1933 302 status code may be used instead, since most user agents react 1934 to a 302 response as described here for 303. 1935 1936 This behavior can be overridden with CURLOPT_POSTREDIR 1937 */ 1938 if((data->set.httpreq == HTTPREQ_POST 1939 || data->set.httpreq == HTTPREQ_POST_FORM) 1940 && !data->set.post302) { 1941 infof(data, 1942 "Violate RFC 2616/10.3.3 and switch from POST to GET\n"); 1943 data->set.httpreq = HTTPREQ_GET; 1944 } 1945 break; 1946 1947 case 303: /* See Other */ 1948 /* Disable both types of POSTs, since doing a second POST when 1949 * following isn't what anyone would want! */ 1950 if(data->set.httpreq != HTTPREQ_GET) { 1951 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */ 1952 infof(data, "Disables POST, goes with %s\n", 1953 data->set.opt_no_body?"HEAD":"GET"); 1954 } 1955 break; 1956 case 304: /* Not Modified */ 1957 /* 304 means we did a conditional request and it was "Not modified". 1958 * We shouldn't get any Location: header in this response! 1959 */ 1960 break; 1961 case 305: /* Use Proxy */ 1962 /* (quote from RFC2616, section 10.3.6): 1963 * "The requested resource MUST be accessed through the proxy given 1964 * by the Location field. The Location field gives the URI of the 1965 * proxy. The recipient is expected to repeat this single request 1966 * via the proxy. 305 responses MUST only be generated by origin 1967 * servers." 1968 */ 1969 break; 1970 } 1971 Curl_pgrsTime(data, TIMER_REDIRECT); 1972 Curl_pgrsResetTimes(data); 1973 1974 return CURLE_OK; 1975#endif /* CURL_DISABLE_HTTP */ 1976} 1977 1978static CURLcode 1979connect_host(struct SessionHandle *data, 1980 struct connectdata **conn) 1981{ 1982 CURLcode res = CURLE_OK; 1983 1984 bool async; 1985 bool protocol_done=TRUE; /* will be TRUE always since this is only used 1986 within the easy interface */ 1987 Curl_pgrsTime(data, TIMER_STARTSINGLE); 1988 res = Curl_connect(data, conn, &async, &protocol_done); 1989 1990 if((CURLE_OK == res) && async) { 1991 /* Now, if async is TRUE here, we need to wait for the name 1992 to resolve */ 1993 res = Curl_resolver_wait_resolv(*conn, NULL); 1994 if(CURLE_OK == res) { 1995 /* Resolved, continue with the connection */ 1996 res = Curl_async_resolved(*conn, &protocol_done); 1997 if(res) 1998 *conn = NULL; 1999 } 2000 else { 2001 /* if we can't resolve, we kill this "connection" now */ 2002 (void)Curl_disconnect(*conn, /* dead_connection */ FALSE); 2003 *conn = NULL; 2004 } 2005 } 2006 2007 return res; 2008} 2009 2010CURLcode 2011Curl_reconnect_request(struct connectdata **connp) 2012{ 2013 CURLcode result = CURLE_OK; 2014 struct connectdata *conn = *connp; 2015 struct SessionHandle *data = conn->data; 2016 2017 /* This was a re-use of a connection and we got a write error in the 2018 * DO-phase. Then we DISCONNECT this connection and have another attempt to 2019 * CONNECT and then DO again! The retry cannot possibly find another 2020 * connection to re-use, since we only keep one possible connection for 2021 * each. */ 2022 2023 infof(data, "Re-used connection seems dead, get a new one\n"); 2024 2025 conn->bits.close = TRUE; /* enforce close of this connection */ 2026 result = Curl_done(&conn, result, FALSE); /* we are so done with this */ 2027 2028 /* conn may no longer be a good pointer */ 2029 2030 /* 2031 * According to bug report #1330310. We need to check for CURLE_SEND_ERROR 2032 * here as well. I figure this could happen when the request failed on a FTP 2033 * connection and thus Curl_done() itself tried to use the connection 2034 * (again). Slight Lack of feedback in the report, but I don't think this 2035 * extra check can do much harm. 2036 */ 2037 if((CURLE_OK == result) || (CURLE_SEND_ERROR == result)) { 2038 bool async; 2039 bool protocol_done = TRUE; 2040 2041 /* Now, redo the connect and get a new connection */ 2042 result = Curl_connect(data, connp, &async, &protocol_done); 2043 if(CURLE_OK == result) { 2044 /* We have connected or sent away a name resolve query fine */ 2045 2046 conn = *connp; /* setup conn to again point to something nice */ 2047 if(async) { 2048 /* Now, if async is TRUE here, we need to wait for the name 2049 to resolve */ 2050 result = Curl_resolver_wait_resolv(conn, NULL); 2051 if(result) 2052 return result; 2053 2054 /* Resolved, continue with the connection */ 2055 result = Curl_async_resolved(conn, &protocol_done); 2056 if(result) 2057 return result; 2058 } 2059 } 2060 } 2061 2062 return result; 2063} 2064 2065/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted. 2066 2067 NOTE: that the *url is malloc()ed. */ 2068CURLcode Curl_retry_request(struct connectdata *conn, 2069 char **url) 2070{ 2071 struct SessionHandle *data = conn->data; 2072 2073 *url = NULL; 2074 2075 /* if we're talking upload, we can't do the checks below, unless the protocol 2076 is HTTP as when uploading over HTTP we will still get a response */ 2077 if(data->set.upload && 2078 !(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP))) 2079 return CURLE_OK; 2080 2081 if(/* workaround for broken TLS servers */ data->state.ssl_connect_retry || 2082 ((data->req.bytecount + 2083 data->req.headerbytecount == 0) && 2084 conn->bits.reuse && 2085 !data->set.opt_no_body && 2086 data->set.rtspreq != RTSPREQ_RECEIVE)) { 2087 /* We got no data, we attempted to re-use a connection and yet we want a 2088 "body". This might happen if the connection was left alive when we were 2089 done using it before, but that was closed when we wanted to read from 2090 it again. Bad luck. Retry the same request on a fresh connect! */ 2091 infof(conn->data, "Connection died, retrying a fresh connect\n"); 2092 *url = strdup(conn->data->change.url); 2093 if(!*url) 2094 return CURLE_OUT_OF_MEMORY; 2095 2096 conn->bits.close = TRUE; /* close this connection */ 2097 conn->bits.retry = TRUE; /* mark this as a connection we're about 2098 to retry. Marking it this way should 2099 prevent i.e HTTP transfers to return 2100 error just because nothing has been 2101 transferred! */ 2102 2103 if(data->state.proto.http->writebytecount) 2104 return Curl_readrewind(conn); 2105 } 2106 return CURLE_OK; 2107} 2108 2109static CURLcode Curl_do_perform(struct SessionHandle *data) 2110{ 2111 CURLcode res; 2112 CURLcode res2; 2113 struct connectdata *conn=NULL; 2114 char *newurl = NULL; /* possibly a new URL to follow to! */ 2115 followtype follow = FOLLOW_NONE; 2116 2117 data->state.used_interface = Curl_if_easy; 2118 2119 res = Curl_pretransfer(data); 2120 if(res) 2121 return res; 2122 2123 /* 2124 * It is important that there is NO 'return' from this function at any other 2125 * place than falling down to the end of the function! This is because we 2126 * have cleanup stuff that must be done before we get back, and that is only 2127 * performed after this do-while loop. 2128 */ 2129 2130 for(;;) { 2131 res = connect_host(data, &conn); /* primary connection */ 2132 2133 if(res == CURLE_OK) { 2134 bool do_done; 2135 if(data->set.connect_only) { 2136 /* keep connection open for application to use the socket */ 2137 conn->bits.close = FALSE; 2138 res = Curl_done(&conn, CURLE_OK, FALSE); 2139 break; 2140 } 2141 res = Curl_do(&conn, &do_done); 2142 2143 if(res == CURLE_OK) { 2144 if(conn->data->set.wildcardmatch) { 2145 if(conn->data->wildcard.state == CURLWC_DONE || 2146 conn->data->wildcard.state == CURLWC_SKIP) { 2147 /* keep connection open for application to use the socket */ 2148 conn->bits.close = FALSE; 2149 res = Curl_done(&conn, CURLE_OK, FALSE); 2150 break; 2151 } 2152 } 2153 res = Transfer(conn); /* now fetch that URL please */ 2154 if((res == CURLE_OK) || (res == CURLE_RECV_ERROR)) { 2155 bool retry = FALSE; 2156 CURLcode rc = Curl_retry_request(conn, &newurl); 2157 if(rc) 2158 res = rc; 2159 else 2160 retry = (newurl?TRUE:FALSE); 2161 2162 if(retry) { 2163 /* we know (newurl != NULL) at this point */ 2164 res = CURLE_OK; 2165 follow = FOLLOW_RETRY; 2166 } 2167 else if(res == CURLE_OK) { 2168 /* 2169 * We must duplicate the new URL here as the connection data may 2170 * be free()ed in the Curl_done() function. We prefer the newurl 2171 * one since that's used for redirects or just further requests 2172 * for retries or multi-stage HTTP auth methods etc. 2173 */ 2174 if(data->req.newurl) { 2175 follow = FOLLOW_REDIR; 2176 newurl = strdup(data->req.newurl); 2177 if(!newurl) 2178 res = CURLE_OUT_OF_MEMORY; 2179 } 2180 else if(data->req.location) { 2181 follow = FOLLOW_FAKE; 2182 newurl = strdup(data->req.location); 2183 if(!newurl) 2184 res = CURLE_OUT_OF_MEMORY; 2185 } 2186 } 2187 2188 /* in the above cases where 'newurl' gets assigned, we have a fresh 2189 * allocated memory pointed to */ 2190 } 2191 if(res != CURLE_OK) { 2192 /* The transfer phase returned error, we mark the connection to get 2193 * closed to prevent being re-used. This is because we can't 2194 * possibly know if the connection is in a good shape or not now. */ 2195 conn->bits.close = TRUE; 2196 2197 if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) { 2198 /* if we failed anywhere, we must clean up the secondary socket if 2199 it was used */ 2200 Curl_closesocket(conn, conn->sock[SECONDARYSOCKET]); 2201 conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; 2202 } 2203 } 2204 2205 /* Always run Curl_done(), even if some of the previous calls 2206 failed, but return the previous (original) error code */ 2207 res2 = Curl_done(&conn, res, FALSE); 2208 2209 if(CURLE_OK == res) 2210 res = res2; 2211 } 2212 else if(conn) 2213 /* Curl_do() failed, clean up left-overs in the done-call, but note 2214 that at some cases the conn pointer is NULL when Curl_do() failed 2215 and the connection cache is very small so only call Curl_done() if 2216 conn is still "alive". */ 2217 /* ignore return code since we already have an error to return */ 2218 (void)Curl_done(&conn, res, FALSE); 2219 2220 /* 2221 * Important: 'conn' cannot be used here, since it may have been closed 2222 * in 'Curl_done' or other functions. 2223 */ 2224 2225 if((res == CURLE_OK) && follow) { 2226 res = Curl_follow(data, newurl, follow); 2227 if(CURLE_OK == res) { 2228 /* if things went fine, Curl_follow() freed or otherwise took 2229 responsibility for the newurl pointer */ 2230 newurl = NULL; 2231 if(follow >= FOLLOW_RETRY) { 2232 follow = FOLLOW_NONE; 2233 continue; 2234 } 2235 /* else we break out of the loop below */ 2236 } 2237 } 2238 } 2239 break; /* it only reaches here when this shouldn't loop */ 2240 2241 } /* loop if Location: */ 2242 2243 if(newurl) 2244 free(newurl); 2245 2246 if(res && !data->state.errorbuf) { 2247 /* 2248 * As an extra precaution: if no error string has been set and there was 2249 * an error, use the strerror() string or if things are so bad that not 2250 * even that is good, set a bad string that mentions the error code. 2251 */ 2252 const char *str = curl_easy_strerror(res); 2253 if(!str) 2254 failf(data, "unspecified error %d", (int)res); 2255 else 2256 failf(data, "%s", str); 2257 } 2258 2259 /* run post-transfer unconditionally, but don't clobber the return code if 2260 we already have an error code recorder */ 2261 res2 = Curl_posttransfer(data); 2262 if(!res && res2) 2263 res = res2; 2264 2265 return res; 2266} 2267 2268/* 2269 * Curl_perform() is the internal high-level function that gets called by the 2270 * external curl_easy_perform() function. It inits, performs and cleans up a 2271 * single file transfer. 2272 */ 2273CURLcode Curl_perform(struct SessionHandle *data) 2274{ 2275 CURLcode res; 2276 if(!data->set.wildcardmatch) 2277 return Curl_do_perform(data); 2278 2279 /* init main wildcard structures */ 2280 res = Curl_wildcard_init(&data->wildcard); 2281 if(res) 2282 return res; 2283 2284 res = Curl_do_perform(data); 2285 if(res) { 2286 Curl_wildcard_dtor(&data->wildcard); 2287 return res; 2288 } 2289 2290 /* wildcard loop */ 2291 while(!res && data->wildcard.state != CURLWC_DONE) 2292 res = Curl_do_perform(data); 2293 2294 Curl_wildcard_dtor(&data->wildcard); 2295 2296 /* wildcard download finished or failed */ 2297 data->wildcard.state = CURLWC_INIT; 2298 return res; 2299} 2300 2301/* 2302 * Curl_setup_transfer() is called to setup some basic properties for the 2303 * upcoming transfer. 2304 */ 2305void 2306Curl_setup_transfer( 2307 struct connectdata *conn, /* connection data */ 2308 int sockindex, /* socket index to read from or -1 */ 2309 curl_off_t size, /* -1 if unknown at this point */ 2310 bool getheader, /* TRUE if header parsing is wanted */ 2311 curl_off_t *bytecountp, /* return number of bytes read or NULL */ 2312 int writesockindex, /* socket index to write to, it may very well be 2313 the same we read from. -1 disables */ 2314 curl_off_t *writecountp /* return number of bytes written or NULL */ 2315 ) 2316{ 2317 struct SessionHandle *data; 2318 struct SingleRequest *k; 2319 2320 DEBUGASSERT(conn != NULL); 2321 2322 data = conn->data; 2323 k = &data->req; 2324 2325 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1)); 2326 2327 /* now copy all input parameters */ 2328 conn->sockfd = sockindex == -1 ? 2329 CURL_SOCKET_BAD : conn->sock[sockindex]; 2330 conn->writesockfd = writesockindex == -1 ? 2331 CURL_SOCKET_BAD:conn->sock[writesockindex]; 2332 k->getheader = getheader; 2333 2334 k->size = size; 2335 k->bytecountp = bytecountp; 2336 k->writebytecountp = writecountp; 2337 2338 /* The code sequence below is placed in this function just because all 2339 necessary input is not always known in do_complete() as this function may 2340 be called after that */ 2341 2342 if(!k->getheader) { 2343 k->header = FALSE; 2344 if(size > 0) 2345 Curl_pgrsSetDownloadSize(data, size); 2346 } 2347 /* we want header and/or body, if neither then don't do this! */ 2348 if(k->getheader || !data->set.opt_no_body) { 2349 2350 if(conn->sockfd != CURL_SOCKET_BAD) 2351 k->keepon |= KEEP_RECV; 2352 2353 if(conn->writesockfd != CURL_SOCKET_BAD) { 2354 /* HTTP 1.1 magic: 2355 2356 Even if we require a 100-return code before uploading data, we might 2357 need to write data before that since the REQUEST may not have been 2358 finished sent off just yet. 2359 2360 Thus, we must check if the request has been sent before we set the 2361 state info where we wait for the 100-return code 2362 */ 2363 if((data->state.expect100header) && 2364 (data->state.proto.http->sending == HTTPSEND_BODY)) { 2365 /* wait with write until we either got 100-continue or a timeout */ 2366 k->exp100 = EXP100_AWAITING_CONTINUE; 2367 k->start100 = k->start; 2368 2369 /* set a timeout for the multi interface */ 2370 Curl_expire(data, CURL_TIMEOUT_EXPECT_100); 2371 } 2372 else { 2373 if(data->state.expect100header) 2374 /* when we've sent off the rest of the headers, we must await a 2375 100-continue but first finish sending the request */ 2376 k->exp100 = EXP100_SENDING_REQUEST; 2377 2378 /* enable the write bit when we're not waiting for continue */ 2379 k->keepon |= KEEP_SEND; 2380 } 2381 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */ 2382 } /* if(k->getheader || !data->set.opt_no_body) */ 2383 2384} 2385