1/***************************************************************************
2 *                                  _   _ ____  _
3 *  Project                     ___| | | |  _ \| |
4 *                             / __| | | | |_) | |
5 *                            | (__| |_| |  _ <| |___
6 *                             \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 ***************************************************************************/
22
23#include "setup.h"
24
25/* -- WIN32 approved -- */
26#include <stdio.h>
27#include <string.h>
28#include <stdarg.h>
29#include <stdlib.h>
30#include <ctype.h>
31#include <errno.h>
32
33#include "strtoofft.h"
34#include "strequal.h"
35#include "rawstr.h"
36
37#ifdef WIN32
38#include <time.h>
39#include <io.h>
40#else
41#ifdef HAVE_SYS_SOCKET_H
42#include <sys/socket.h>
43#endif
44#ifdef HAVE_NETINET_IN_H
45#include <netinet/in.h>
46#endif
47#ifdef HAVE_SYS_TIME_H
48#include <sys/time.h>
49#endif
50#ifdef HAVE_UNISTD_H
51#include <unistd.h>
52#endif
53#ifdef HAVE_NETDB_H
54#include <netdb.h>
55#endif
56#ifdef HAVE_ARPA_INET_H
57#include <arpa/inet.h>
58#endif
59#ifdef HAVE_NET_IF_H
60#include <net/if.h>
61#endif
62#ifdef HAVE_SYS_IOCTL_H
63#include <sys/ioctl.h>
64#endif
65#ifdef HAVE_SIGNAL_H
66#include <signal.h>
67#endif
68
69#ifdef HAVE_SYS_PARAM_H
70#include <sys/param.h>
71#endif
72
73#ifdef HAVE_SYS_SELECT_H
74#include <sys/select.h>
75#endif
76
77#ifndef HAVE_SOCKET
78#error "We can't compile without socket() support!"
79#endif
80
81#endif  /* WIN32 */
82
83#include "urldata.h"
84#include <curl/curl.h>
85#include "netrc.h"
86
87#include "content_encoding.h"
88#include "hostip.h"
89#include "transfer.h"
90#include "sendf.h"
91#include "speedcheck.h"
92#include "progress.h"
93#include "http.h"
94#include "url.h"
95#include "getinfo.h"
96#include "sslgen.h"
97#include "http_digest.h"
98#include "http_ntlm.h"
99#include "http_negotiate.h"
100#include "share.h"
101#include "curl_memory.h"
102#include "select.h"
103#include "multiif.h"
104#include "connect.h"
105#include "non-ascii.h"
106
107#define _MPRINTF_REPLACE /* use our functions only */
108#include <curl/mprintf.h>
109
110/* The last #include file should be: */
111#include "memdebug.h"
112
113#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
114
115/*
116 * This function will call the read callback to fill our buffer with data
117 * to upload.
118 */
119CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
120{
121  struct SessionHandle *data = conn->data;
122  size_t buffersize = (size_t)bytes;
123  int nread;
124#ifdef CURL_DOES_CONVERSIONS
125  bool sending_http_headers = FALSE;
126
127  if((conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) &&
128     (data->state.proto.http->sending == HTTPSEND_REQUEST)) {
129    /* We're sending the HTTP request headers, not the data.
130       Remember that so we don't re-translate them into garbage. */
131    sending_http_headers = TRUE;
132  }
133#endif
134
135  if(data->req.upload_chunky) {
136    /* if chunked Transfer-Encoding */
137    buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
138    data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
139  }
140
141  /* this function returns a size_t, so we typecast to int to prevent warnings
142     with picky compilers */
143  nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
144                                buffersize, conn->fread_in);
145
146  if(nread == CURL_READFUNC_ABORT) {
147    failf(data, "operation aborted by callback");
148    *nreadp = 0;
149    return CURLE_ABORTED_BY_CALLBACK;
150  }
151  else if(nread == CURL_READFUNC_PAUSE) {
152    struct SingleRequest *k = &data->req;
153    /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
154    k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
155    if(data->req.upload_chunky) {
156      /* Back out the preallocation done above */
157      data->req.upload_fromhere -= (8 + 2);
158    }
159    *nreadp = 0;
160    return CURLE_OK; /* nothing was read */
161  }
162  else if((size_t)nread > buffersize) {
163    /* the read function returned a too large value */
164    *nreadp = 0;
165    failf(data, "read function returned funny value");
166    return CURLE_READ_ERROR;
167  }
168
169  if(!data->req.forbidchunk && data->req.upload_chunky) {
170    /* if chunked Transfer-Encoding
171     *    build chunk:
172     *
173     *        <HEX SIZE> CRLF
174     *        <DATA> CRLF
175     */
176    /* On non-ASCII platforms the <DATA> may or may not be
177       translated based on set.prefer_ascii while the protocol
178       portion must always be translated to the network encoding.
179       To further complicate matters, line end conversion might be
180       done later on, so we need to prevent CRLFs from becoming
181       CRCRLFs if that's the case.  To do this we use bare LFs
182       here, knowing they'll become CRLFs later on.
183     */
184
185    char hexbuffer[11];
186    const char *endofline_native;
187    const char *endofline_network;
188    int hexlen;
189#ifdef CURL_DO_LINEEND_CONV
190    if((data->set.crlf) || (data->set.prefer_ascii)) {
191#else
192    if(data->set.crlf) {
193#endif /* CURL_DO_LINEEND_CONV */
194      /* \n will become \r\n later on */
195      endofline_native  = "\n";
196      endofline_network = "\x0a";
197    }
198    else {
199      endofline_native  = "\r\n";
200      endofline_network = "\x0d\x0a";
201    }
202    hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
203                      "%x%s", nread, endofline_native);
204
205    /* move buffer pointer */
206    data->req.upload_fromhere -= hexlen;
207    nread += hexlen;
208
209    /* copy the prefix to the buffer, leaving out the NUL */
210    memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
211
212    /* always append ASCII CRLF to the data */
213    memcpy(data->req.upload_fromhere + nread,
214           endofline_network,
215           strlen(endofline_network));
216
217#ifdef CURL_DOES_CONVERSIONS
218    CURLcode res;
219    int length;
220    if(data->set.prefer_ascii) {
221      /* translate the protocol and data */
222      length = nread;
223    }
224    else {
225      /* just translate the protocol portion */
226      length = strlen(hexbuffer);
227    }
228    res = Curl_convert_to_network(data, data->req.upload_fromhere, length);
229    /* Curl_convert_to_network calls failf if unsuccessful */
230    if(res)
231      return(res);
232#endif /* CURL_DOES_CONVERSIONS */
233
234    if((nread - hexlen) == 0)
235      /* mark this as done once this chunk is transferred */
236      data->req.upload_done = TRUE;
237
238    nread+=(int)strlen(endofline_native); /* for the added end of line */
239  }
240#ifdef CURL_DOES_CONVERSIONS
241  else if((data->set.prefer_ascii) && (!sending_http_headers)) {
242    CURLcode res;
243    res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
244    /* Curl_convert_to_network calls failf if unsuccessful */
245    if(res != CURLE_OK)
246      return(res);
247  }
248#endif /* CURL_DOES_CONVERSIONS */
249
250  *nreadp = nread;
251
252  return CURLE_OK;
253}
254
255
256/*
257 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
258 * POST/PUT with multi-pass authentication when a sending was denied and a
259 * resend is necessary.
260 */
261CURLcode Curl_readrewind(struct connectdata *conn)
262{
263  struct SessionHandle *data = conn->data;
264
265  conn->bits.rewindaftersend = FALSE; /* we rewind now */
266
267  /* explicitly switch off sending data on this connection now since we are
268     about to restart a new transfer and thus we want to avoid inadvertently
269     sending more data on the existing connection until the next transfer
270     starts */
271  data->req.keepon &= ~KEEP_SEND;
272
273  /* We have sent away data. If not using CURLOPT_POSTFIELDS or
274     CURLOPT_HTTPPOST, call app to rewind
275  */
276  if(data->set.postfields ||
277     (data->set.httpreq == HTTPREQ_POST_FORM))
278    ; /* do nothing */
279  else {
280    if(data->set.seek_func) {
281      int err;
282
283      err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
284      if(err) {
285        failf(data, "seek callback returned error %d", (int)err);
286        return CURLE_SEND_FAIL_REWIND;
287      }
288    }
289    else if(data->set.ioctl_func) {
290      curlioerr err;
291
292      err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
293                                   data->set.ioctl_client);
294      infof(data, "the ioctl callback returned %d\n", (int)err);
295
296      if(err) {
297        /* FIXME: convert to a human readable error message */
298        failf(data, "ioctl callback returned error %d", (int)err);
299        return CURLE_SEND_FAIL_REWIND;
300      }
301    }
302    else {
303      /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
304         given FILE * stream and we can actually attempt to rewind that
305         ourself with fseek() */
306      if(data->set.fread_func == (curl_read_callback)fread) {
307        if(-1 != fseek(data->set.in, 0, SEEK_SET))
308          /* successful rewind */
309          return CURLE_OK;
310      }
311
312      /* no callback set or failure above, makes us fail at once */
313      failf(data, "necessary data rewind wasn't possible");
314      return CURLE_SEND_FAIL_REWIND;
315    }
316  }
317  return CURLE_OK;
318}
319
320static int data_pending(const struct connectdata *conn)
321{
322  /* in the case of libssh2, we can never be really sure that we have emptied
323     its internal buffers so we MUST always try until we get EAGAIN back */
324  return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
325    Curl_ssl_data_pending(conn, FIRSTSOCKET);
326}
327
328static void read_rewind(struct connectdata *conn,
329                        size_t thismuch)
330{
331  DEBUGASSERT(conn->read_pos >= thismuch);
332
333  conn->read_pos -= thismuch;
334  conn->bits.stream_was_rewound = TRUE;
335
336#ifdef DEBUGBUILD
337  {
338    char buf[512 + 1];
339    size_t show;
340
341    show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
342    if(conn->master_buffer) {
343      memcpy(buf, conn->master_buffer + conn->read_pos, show);
344      buf[show] = '\0';
345    }
346    else {
347      buf[0] = '\0';
348    }
349
350    DEBUGF(infof(conn->data,
351                 "Buffer after stream rewind (read_pos = %zu): [%s]",
352                 conn->read_pos, buf));
353  }
354#endif
355}
356
357/*
358 * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
359 * remote document with the time provided by CURLOPT_TIMEVAL
360 */
361bool Curl_meets_timecondition(struct SessionHandle *data, time_t timeofdoc)
362{
363  if((timeofdoc == 0) || (data->set.timevalue == 0))
364    return TRUE;
365
366  switch(data->set.timecondition) {
367  case CURL_TIMECOND_IFMODSINCE:
368  default:
369    if(timeofdoc <= data->set.timevalue) {
370      infof(data,
371            "The requested document is not new enough\n");
372      data->info.timecond = TRUE;
373      return FALSE;
374    }
375    break;
376  case CURL_TIMECOND_IFUNMODSINCE:
377    if(timeofdoc >= data->set.timevalue) {
378      infof(data,
379            "The requested document is not old enough\n");
380      data->info.timecond = TRUE;
381      return FALSE;
382    }
383    break;
384  }
385
386  return TRUE;
387}
388
389/*
390 * Go ahead and do a read if we have a readable socket or if
391 * the stream was rewound (in which case we have data in a
392 * buffer)
393 */
394static CURLcode readwrite_data(struct SessionHandle *data,
395                               struct connectdata *conn,
396                               struct SingleRequest *k,
397                               int *didwhat, bool *done)
398{
399  CURLcode result = CURLE_OK;
400  ssize_t nread; /* number of bytes read */
401  size_t excess = 0; /* excess bytes read */
402  bool is_empty_data = FALSE;
403  bool readmore = FALSE; /* used by RTP to signal for more data */
404
405  *done = FALSE;
406
407  /* This is where we loop until we have read everything there is to
408     read or we get a CURLE_AGAIN */
409  do {
410    size_t buffersize = data->set.buffer_size?
411      data->set.buffer_size : BUFSIZE;
412    size_t bytestoread = buffersize;
413
414    if(k->size != -1 && !k->header) {
415      /* make sure we don't read "too much" if we can help it since we
416         might be pipelining and then someone else might want to read what
417         follows! */
418      curl_off_t totalleft = k->size - k->bytecount;
419      if(totalleft < (curl_off_t)bytestoread)
420        bytestoread = (size_t)totalleft;
421    }
422
423    if(bytestoread) {
424      /* receive data from the network! */
425      result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
426
427      /* read would've blocked */
428      if(CURLE_AGAIN == result)
429        break; /* get out of loop */
430
431      if(result>0)
432        return result;
433    }
434    else {
435      /* read nothing but since we wanted nothing we consider this an OK
436         situation to proceed from */
437      nread = 0;
438    }
439
440    if((k->bytecount == 0) && (k->writebytecount == 0)) {
441      Curl_pgrsTime(data, TIMER_STARTTRANSFER);
442      if(k->exp100 > EXP100_SEND_DATA)
443        /* set time stamp to compare with when waiting for the 100 */
444        k->start100 = Curl_tvnow();
445    }
446
447    *didwhat |= KEEP_RECV;
448    /* indicates data of zero size, i.e. empty file */
449    is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
450
451    /* NUL terminate, allowing string ops to be used */
452    if(0 < nread || is_empty_data) {
453      k->buf[nread] = 0;
454    }
455    else if(0 >= nread) {
456      /* if we receive 0 or less here, the server closed the connection
457         and we bail out from this! */
458      DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
459      k->keepon &= ~KEEP_RECV;
460      break;
461    }
462
463    /* Default buffer to use when we write the buffer, it may be changed
464       in the flow below before the actual storing is done. */
465    k->str = k->buf;
466
467    if(conn->handler->readwrite) {
468      result = conn->handler->readwrite(data, conn, &nread, &readmore);
469      if(result)
470        return result;
471      if(readmore)
472        break;
473    }
474
475#ifndef CURL_DISABLE_HTTP
476    /* Since this is a two-state thing, we check if we are parsing
477       headers at the moment or not. */
478    if(k->header) {
479      /* we are in parse-the-header-mode */
480      bool stop_reading = FALSE;
481      result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
482      if(result)
483        return result;
484
485      if(conn->handler->readwrite &&
486         (k->maxdownload <= 0 && nread > 0)) {
487        result = conn->handler->readwrite(data, conn, &nread, &readmore);
488        if(result)
489          return result;
490        if(readmore)
491          break;
492      }
493
494      if(stop_reading) {
495        /* We've stopped dealing with input, get out of the do-while loop */
496
497        if(nread > 0) {
498          if(conn->data->multi && Curl_multi_canPipeline(conn->data->multi)) {
499            infof(data,
500                  "Rewinding stream by : %zd"
501                  " bytes on url %s (zero-length body)\n",
502                  nread, data->state.path);
503            read_rewind(conn, (size_t)nread);
504          }
505          else {
506            infof(data,
507                  "Excess found in a non pipelined read:"
508                  " excess = %zd"
509                  " url = %s (zero-length body)\n",
510                  nread, data->state.path);
511          }
512        }
513
514        break;
515      }
516    }
517#endif /* CURL_DISABLE_HTTP */
518
519
520    /* This is not an 'else if' since it may be a rest from the header
521       parsing, where the beginning of the buffer is headers and the end
522       is non-headers. */
523    if(k->str && !k->header && (nread > 0 || is_empty_data)) {
524
525
526#ifndef CURL_DISABLE_HTTP
527      if(0 == k->bodywrites && !is_empty_data) {
528        /* These checks are only made the first time we are about to
529           write a piece of the body */
530        if(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) {
531          /* HTTP-only checks */
532
533          if(data->req.newurl) {
534            if(conn->bits.close) {
535              /* Abort after the headers if "follow Location" is set
536                 and we're set to close anyway. */
537              k->keepon &= ~KEEP_RECV;
538              *done = TRUE;
539              return CURLE_OK;
540            }
541            /* We have a new url to load, but since we want to be able
542               to re-use this connection properly, we read the full
543               response in "ignore more" */
544            k->ignorebody = TRUE;
545            infof(data, "Ignoring the response-body\n");
546          }
547          if(data->state.resume_from && !k->content_range &&
548             (data->set.httpreq==HTTPREQ_GET) &&
549             !k->ignorebody) {
550            /* we wanted to resume a download, although the server doesn't
551             * seem to support this and we did this with a GET (if it
552             * wasn't a GET we did a POST or PUT resume) */
553            failf(data, "HTTP server doesn't seem to support "
554                  "byte ranges. Cannot resume.");
555            return CURLE_RANGE_ERROR;
556          }
557
558          if(data->set.timecondition && !data->state.range) {
559            /* A time condition has been set AND no ranges have been
560               requested. This seems to be what chapter 13.3.4 of
561               RFC 2616 defines to be the correct action for a
562               HTTP/1.1 client */
563
564            if(!Curl_meets_timecondition(data, k->timeofdoc)) {
565              *done = TRUE;
566              /* we abort the transfer before it is completed == we ruin the
567                 re-use ability. Close the connection */
568              conn->bits.close = TRUE;
569              return CURLE_OK;
570            }
571          } /* we have a time condition */
572
573        } /* this is HTTP */
574      } /* this is the first time we write a body part */
575#endif /* CURL_DISABLE_HTTP */
576      k->bodywrites++;
577
578      /* pass data to the debug function before it gets "dechunked" */
579      if(data->set.verbose) {
580        if(k->badheader) {
581          Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
582                     (size_t)k->hbuflen, conn);
583          if(k->badheader == HEADER_PARTHEADER)
584            Curl_debug(data, CURLINFO_DATA_IN,
585                       k->str, (size_t)nread, conn);
586        }
587        else
588          Curl_debug(data, CURLINFO_DATA_IN,
589                     k->str, (size_t)nread, conn);
590      }
591
592#ifndef CURL_DISABLE_HTTP
593      if(k->chunk) {
594        /*
595         * Here comes a chunked transfer flying and we need to decode this
596         * properly.  While the name says read, this function both reads
597         * and writes away the data. The returned 'nread' holds the number
598         * of actual data it wrote to the client.
599         */
600
601        CHUNKcode res =
602          Curl_httpchunk_read(conn, k->str, nread, &nread);
603
604        if(CHUNKE_OK < res) {
605          if(CHUNKE_WRITE_ERROR == res) {
606            failf(data, "Failed writing data");
607            return CURLE_WRITE_ERROR;
608          }
609          failf(data, "Received problem %d in the chunky parser", (int)res);
610          return CURLE_RECV_ERROR;
611        }
612        else if(CHUNKE_STOP == res) {
613          size_t dataleft;
614          /* we're done reading chunks! */
615          k->keepon &= ~KEEP_RECV; /* read no more */
616
617          /* There are now possibly N number of bytes at the end of the
618             str buffer that weren't written to the client.
619
620             We DO care about this data if we are pipelining.
621             Push it back to be read on the next pass. */
622
623          dataleft = conn->chunk.dataleft;
624          if(dataleft != 0) {
625            infof(conn->data, "Leftovers after chunking: %zu bytes", dataleft);
626            if(conn->data->multi &&
627               Curl_multi_canPipeline(conn->data->multi)) {
628              /* only attempt the rewind if we truly are pipelining */
629              infof(conn->data, "Rewinding %zu bytes\n",dataleft);
630              read_rewind(conn, dataleft);
631            }
632          }
633        }
634        /* If it returned OK, we just keep going */
635      }
636#endif   /* CURL_DISABLE_HTTP */
637
638      /* Account for body content stored in the header buffer */
639      if(k->badheader && !k->ignorebody) {
640        DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
641                     k->hbuflen));
642        k->bytecount += k->hbuflen;
643      }
644
645      if((-1 != k->maxdownload) &&
646         (k->bytecount + nread >= k->maxdownload)) {
647
648        excess = (size_t)(k->bytecount + nread - k->maxdownload);
649        if(excess > 0 && !k->ignorebody) {
650          if(conn->data->multi && Curl_multi_canPipeline(conn->data->multi)) {
651            /* The 'excess' amount below can't be more than BUFSIZE which
652               always will fit in a size_t */
653            infof(data,
654                  "Rewinding stream by : %zu"
655                  " bytes on url %s (size = %" FORMAT_OFF_T
656                  ", maxdownload = %" FORMAT_OFF_T
657                  ", bytecount = %" FORMAT_OFF_T ", nread = %zd)\n",
658                  excess, data->state.path,
659                  k->size, k->maxdownload, k->bytecount, nread);
660            read_rewind(conn, excess);
661          }
662          else {
663            infof(data,
664                  "Excess found in a non pipelined read:"
665                  " excess = %zu"
666                  ", size = %" FORMAT_OFF_T
667                  ", maxdownload = %" FORMAT_OFF_T
668                  ", bytecount = %" FORMAT_OFF_T "\n",
669                  excess, k->size, k->maxdownload, k->bytecount);
670          }
671        }
672
673        nread = (ssize_t) (k->maxdownload - k->bytecount);
674        if(nread < 0 ) /* this should be unusual */
675          nread = 0;
676
677        k->keepon &= ~KEEP_RECV; /* we're done reading */
678      }
679
680      k->bytecount += nread;
681
682      Curl_pgrsSetDownloadCounter(data, k->bytecount);
683
684      if(!k->chunk && (nread || k->badheader || is_empty_data)) {
685        /* If this is chunky transfer, it was already written */
686
687        if(k->badheader && !k->ignorebody) {
688          /* we parsed a piece of data wrongly assuming it was a header
689             and now we output it as body instead */
690
691          /* Don't let excess data pollute body writes */
692          if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
693            result = Curl_client_write(conn, CLIENTWRITE_BODY,
694                                       data->state.headerbuff,
695                                       k->hbuflen);
696          else
697            result = Curl_client_write(conn, CLIENTWRITE_BODY,
698                                       data->state.headerbuff,
699                                       (size_t)k->maxdownload);
700
701          if(result)
702            return result;
703        }
704        if(k->badheader < HEADER_ALLBAD) {
705          /* This switch handles various content encodings. If there's an
706             error here, be sure to check over the almost identical code
707             in http_chunks.c.
708             Make sure that ALL_CONTENT_ENCODINGS contains all the
709             encodings handled here. */
710#ifdef HAVE_LIBZ
711          switch (conn->data->set.http_ce_skip ?
712                  IDENTITY : k->auto_decoding) {
713          case IDENTITY:
714#endif
715            /* This is the default when the server sends no
716               Content-Encoding header. See Curl_readwrite_init; the
717               memset() call initializes k->auto_decoding to zero. */
718            if(!k->ignorebody) {
719
720#ifndef CURL_DISABLE_POP3
721              if(conn->handler->protocol&CURLPROTO_POP3)
722                result = Curl_pop3_write(conn, k->str, nread);
723              else
724#endif /* CURL_DISABLE_POP3 */
725
726                result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
727                                           nread);
728            }
729#ifdef HAVE_LIBZ
730            break;
731
732          case DEFLATE:
733            /* Assume CLIENTWRITE_BODY; headers are not encoded. */
734            if(!k->ignorebody)
735              result = Curl_unencode_deflate_write(conn, k, nread);
736            break;
737
738          case GZIP:
739            /* Assume CLIENTWRITE_BODY; headers are not encoded. */
740            if(!k->ignorebody)
741              result = Curl_unencode_gzip_write(conn, k, nread);
742            break;
743
744          case COMPRESS:
745          default:
746            failf (data, "Unrecognized content encoding type. "
747                   "libcurl understands `identity', `deflate' and `gzip' "
748                   "content encodings.");
749            result = CURLE_BAD_CONTENT_ENCODING;
750            break;
751          }
752#endif
753        }
754        k->badheader = HEADER_NORMAL; /* taken care of now */
755
756        if(result)
757          return result;
758      }
759
760    } /* if(! header and data to read ) */
761
762    if(conn->handler->readwrite &&
763       (excess > 0 && !conn->bits.stream_was_rewound)) {
764      /* Parse the excess data */
765      k->str += nread;
766      nread = (ssize_t)excess;
767
768      result = conn->handler->readwrite(data, conn, &nread, &readmore);
769      if(result)
770        return result;
771
772      if(readmore)
773        k->keepon |= KEEP_RECV; /* we're not done reading */
774      break;
775    }
776
777    if(is_empty_data) {
778      /* if we received nothing, the server closed the connection and we
779         are done */
780      k->keepon &= ~KEEP_RECV;
781    }
782
783  } while(data_pending(conn));
784
785  if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
786     conn->bits.close ) {
787    /* When we've read the entire thing and the close bit is set, the server
788       may now close the connection. If there's now any kind of sending going
789       on from our side, we need to stop that immediately. */
790    infof(data, "we are done reading and this is set to close, stop send\n");
791    k->keepon &= ~KEEP_SEND; /* no writing anymore either */
792  }
793
794  return CURLE_OK;
795}
796
797/*
798 * Send data to upload to the server, when the socket is writable.
799 */
800static CURLcode readwrite_upload(struct SessionHandle *data,
801                                 struct connectdata *conn,
802                                 struct SingleRequest *k,
803                                 int *didwhat)
804{
805  ssize_t i, si;
806  ssize_t bytes_written;
807  CURLcode result;
808  ssize_t nread; /* number of bytes read */
809  bool sending_http_headers = FALSE;
810
811  if((k->bytecount == 0) && (k->writebytecount == 0))
812    Curl_pgrsTime(data, TIMER_STARTTRANSFER);
813
814  *didwhat |= KEEP_SEND;
815
816  /*
817   * We loop here to do the READ and SEND loop until we run out of
818   * data to send or until we get EWOULDBLOCK back
819   */
820  do {
821
822    /* only read more data if there's no upload data already
823       present in the upload buffer */
824    if(0 == data->req.upload_present) {
825      /* init the "upload from here" pointer */
826      data->req.upload_fromhere = k->uploadbuf;
827
828      if(!k->upload_done) {
829        /* HTTP pollution, this should be written nicer to become more
830           protocol agnostic. */
831        int fillcount;
832
833        if((k->exp100 == EXP100_SENDING_REQUEST) &&
834           (data->state.proto.http->sending == HTTPSEND_BODY)) {
835          /* If this call is to send body data, we must take some action:
836             We have sent off the full HTTP 1.1 request, and we shall now
837             go into the Expect: 100 state and await such a header */
838          k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
839          k->keepon &= ~KEEP_SEND;         /* disable writing */
840          k->start100 = Curl_tvnow();       /* timeout count starts now */
841          *didwhat &= ~KEEP_SEND;  /* we didn't write anything actually */
842
843          /* set a timeout for the multi interface */
844          Curl_expire(data, CURL_TIMEOUT_EXPECT_100);
845          break;
846        }
847
848        if(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)) {
849          if(data->state.proto.http->sending == HTTPSEND_REQUEST)
850            /* We're sending the HTTP request headers, not the data.
851               Remember that so we don't change the line endings. */
852            sending_http_headers = TRUE;
853          else
854            sending_http_headers = FALSE;
855        }
856
857        result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
858        if(result)
859          return result;
860
861        nread = (ssize_t)fillcount;
862      }
863      else
864        nread = 0; /* we're done uploading/reading */
865
866      if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
867        /* this is a paused transfer */
868        break;
869      }
870      else if(nread<=0) {
871        /* done */
872        k->keepon &= ~KEEP_SEND; /* we're done writing */
873
874        if(conn->bits.rewindaftersend) {
875          result = Curl_readrewind(conn);
876          if(result)
877            return result;
878        }
879        break;
880      }
881
882      /* store number of bytes available for upload */
883      data->req.upload_present = nread;
884
885#ifndef CURL_DISABLE_SMTP
886      if(conn->handler->protocol & CURLPROTO_SMTP) {
887        result = Curl_smtp_escape_eob(conn, nread);
888        if(result)
889          return result;
890      }
891      else
892#endif /* CURL_DISABLE_SMTP */
893
894      /* convert LF to CRLF if so asked */
895      if((!sending_http_headers) &&
896#ifdef CURL_DO_LINEEND_CONV
897        /* always convert if we're FTPing in ASCII mode */
898         ((data->set.crlf) || (data->set.prefer_ascii))) {
899#else
900         (data->set.crlf)) {
901#endif
902        if(data->state.scratch == NULL)
903          data->state.scratch = malloc(2*BUFSIZE);
904        if(data->state.scratch == NULL) {
905          failf (data, "Failed to alloc scratch buffer!");
906          return CURLE_OUT_OF_MEMORY;
907        }
908        /*
909         * ASCII/EBCDIC Note: This is presumably a text (not binary)
910         * transfer so the data should already be in ASCII.
911         * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
912         * must be used instead of the escape sequences \r & \n.
913         */
914        for(i = 0, si = 0; i < nread; i++, si++) {
915          if(data->req.upload_fromhere[i] == 0x0a) {
916            data->state.scratch[si++] = 0x0d;
917            data->state.scratch[si] = 0x0a;
918            if(!data->set.crlf) {
919              /* we're here only because FTP is in ASCII mode...
920                 bump infilesize for the LF we just added */
921              data->set.infilesize++;
922            }
923          }
924          else
925            data->state.scratch[si] = data->req.upload_fromhere[i];
926        }
927        if(si != nread) {
928          /* only perform the special operation if we really did replace
929             anything */
930          nread = si;
931
932          /* upload from the new (replaced) buffer instead */
933          data->req.upload_fromhere = data->state.scratch;
934
935          /* set the new amount too */
936          data->req.upload_present = nread;
937        }
938      }
939    } /* if 0 == data->req.upload_present */
940    else {
941      /* We have a partial buffer left from a previous "round". Use
942         that instead of reading more data */
943    }
944
945    /* write to socket (send away data) */
946    result = Curl_write(conn,
947                        conn->writesockfd,     /* socket to send to */
948                        data->req.upload_fromhere, /* buffer pointer */
949                        data->req.upload_present,  /* buffer size */
950                        &bytes_written);           /* actually sent */
951
952    if(result)
953      return result;
954
955    if(data->set.verbose)
956      /* show the data before we change the pointer upload_fromhere */
957      Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
958                 (size_t)bytes_written, conn);
959
960    k->writebytecount += bytes_written;
961
962    if(k->writebytecount == data->set.infilesize) {
963      /* we have sent all data we were supposed to */
964      k->upload_done = TRUE;
965      infof(data, "We are completely uploaded and fine\n");
966    }
967
968    if(data->req.upload_present != bytes_written) {
969      /* we only wrote a part of the buffer (if anything), deal with it! */
970
971      /* store the amount of bytes left in the buffer to write */
972      data->req.upload_present -= bytes_written;
973
974      /* advance the pointer where to find the buffer when the next send
975         is to happen */
976      data->req.upload_fromhere += bytes_written;
977    }
978    else {
979      /* we've uploaded that buffer now */
980      data->req.upload_fromhere = k->uploadbuf;
981      data->req.upload_present = 0; /* no more bytes left */
982
983      if(k->upload_done) {
984        /* switch off writing, we're done! */
985        k->keepon &= ~KEEP_SEND; /* we're done writing */
986      }
987    }
988
989    Curl_pgrsSetUploadCounter(data, k->writebytecount);
990
991  } while(0); /* just to break out from! */
992
993  return CURLE_OK;
994}
995
996/*
997 * Curl_readwrite() is the low-level function to be called when data is to
998 * be read and written to/from the connection.
999 */
1000CURLcode Curl_readwrite(struct connectdata *conn,
1001                        bool *done)
1002{
1003  struct SessionHandle *data = conn->data;
1004  struct SingleRequest *k = &data->req;
1005  CURLcode result;
1006  int didwhat=0;
1007
1008  curl_socket_t fd_read;
1009  curl_socket_t fd_write;
1010  int select_res = conn->cselect_bits;
1011
1012  conn->cselect_bits = 0;
1013
1014  /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1015     then we are in rate limiting state in that transfer direction */
1016
1017  if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1018    fd_read = conn->sockfd;
1019  else
1020    fd_read = CURL_SOCKET_BAD;
1021
1022  if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1023    fd_write = conn->writesockfd;
1024  else
1025    fd_write = CURL_SOCKET_BAD;
1026
1027  if(!select_res) /* Call for select()/poll() only, if read/write/error
1028                     status is not known. */
1029    select_res = Curl_socket_ready(fd_read, fd_write, 0);
1030
1031  if(select_res == CURL_CSELECT_ERR) {
1032    failf(data, "select/poll returned error");
1033    return CURLE_SEND_ERROR;
1034  }
1035
1036  /* We go ahead and do a read if we have a readable socket or if
1037     the stream was rewound (in which case we have data in a
1038     buffer) */
1039  if((k->keepon & KEEP_RECV) &&
1040     ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1041
1042    result = readwrite_data(data, conn, k, &didwhat, done);
1043    if(result || *done)
1044      return result;
1045  }
1046
1047  /* If we still have writing to do, we check if we have a writable socket. */
1048  if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1049    /* write */
1050
1051    result = readwrite_upload(data, conn, k, &didwhat);
1052    if(result)
1053      return result;
1054  }
1055
1056  k->now = Curl_tvnow();
1057  if(didwhat) {
1058    /* Update read/write counters */
1059    if(k->bytecountp)
1060      *k->bytecountp = k->bytecount; /* read count */
1061    if(k->writebytecountp)
1062      *k->writebytecountp = k->writebytecount; /* write count */
1063  }
1064  else {
1065    /* no read no write, this is a timeout? */
1066    if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1067      /* This should allow some time for the header to arrive, but only a
1068         very short time as otherwise it'll be too much wasted time too
1069         often. */
1070
1071      /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1072
1073         Therefore, when a client sends this header field to an origin server
1074         (possibly via a proxy) from which it has never seen a 100 (Continue)
1075         status, the client SHOULD NOT wait for an indefinite period before
1076         sending the request body.
1077
1078      */
1079
1080      long ms = Curl_tvdiff(k->now, k->start100);
1081      if(ms > CURL_TIMEOUT_EXPECT_100) {
1082        /* we've waited long enough, continue anyway */
1083        k->exp100 = EXP100_SEND_DATA;
1084        k->keepon |= KEEP_SEND;
1085        infof(data, "Done waiting for 100-continue\n");
1086      }
1087    }
1088  }
1089
1090  if(Curl_pgrsUpdate(conn))
1091    result = CURLE_ABORTED_BY_CALLBACK;
1092  else
1093    result = Curl_speedcheck(data, k->now);
1094  if(result)
1095    return result;
1096
1097  if(k->keepon) {
1098    if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1099      if(k->size != -1) {
1100        failf(data, "Operation timed out after %ld milliseconds with %"
1101              FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
1102              Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
1103              k->size);
1104      }
1105      else {
1106        failf(data, "Operation timed out after %ld milliseconds with %"
1107              FORMAT_OFF_T " bytes received",
1108              Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
1109      }
1110      return CURLE_OPERATION_TIMEDOUT;
1111    }
1112  }
1113  else {
1114    /*
1115     * The transfer has been performed. Just make some general checks before
1116     * returning.
1117     */
1118
1119    if(!(data->set.opt_no_body) && (k->size != -1) &&
1120       (k->bytecount != k->size) &&
1121#ifdef CURL_DO_LINEEND_CONV
1122       /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1123          so we'll check to see if the discrepancy can be explained
1124          by the number of CRLFs we've changed to LFs.
1125       */
1126       (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1127#endif /* CURL_DO_LINEEND_CONV */
1128       !data->req.newurl) {
1129      failf(data, "transfer closed with %" FORMAT_OFF_T
1130            " bytes remaining to read",
1131            k->size - k->bytecount);
1132      return CURLE_PARTIAL_FILE;
1133    }
1134    else if(!(data->set.opt_no_body) &&
1135            k->chunk &&
1136            (conn->chunk.state != CHUNK_STOP)) {
1137      /*
1138       * In chunked mode, return an error if the connection is closed prior to
1139       * the empty (terminiating) chunk is read.
1140       *
1141       * The condition above used to check for
1142       * conn->proto.http->chunk.datasize != 0 which is true after reading
1143       * *any* chunk, not just the empty chunk.
1144       *
1145       */
1146      failf(data, "transfer closed with outstanding read data remaining");
1147      return CURLE_PARTIAL_FILE;
1148    }
1149    if(Curl_pgrsUpdate(conn))
1150      return CURLE_ABORTED_BY_CALLBACK;
1151  }
1152
1153  /* Now update the "done" boolean we return */
1154  *done = (bool)(0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1155                                  KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)));
1156
1157  return CURLE_OK;
1158}
1159
1160/*
1161 * Curl_single_getsock() gets called by the multi interface code when the app
1162 * has requested to get the sockets for the current connection. This function
1163 * will then be called once for every connection that the multi interface
1164 * keeps track of. This function will only be called for connections that are
1165 * in the proper state to have this information available.
1166 */
1167int Curl_single_getsock(const struct connectdata *conn,
1168                        curl_socket_t *sock, /* points to numsocks number
1169                                                of sockets */
1170                        int numsocks)
1171{
1172  const struct SessionHandle *data = conn->data;
1173  int bitmap = GETSOCK_BLANK;
1174  unsigned sockindex = 0;
1175
1176  if(conn->handler->perform_getsock)
1177    return conn->handler->perform_getsock(conn, sock, numsocks);
1178
1179  if(numsocks < 2)
1180    /* simple check but we might need two slots */
1181    return GETSOCK_BLANK;
1182
1183  /* don't include HOLD and PAUSE connections */
1184  if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1185
1186    DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1187
1188    bitmap |= GETSOCK_READSOCK(sockindex);
1189    sock[sockindex] = conn->sockfd;
1190  }
1191
1192  /* don't include HOLD and PAUSE connections */
1193  if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1194
1195    if((conn->sockfd != conn->writesockfd) ||
1196       !(data->req.keepon & KEEP_RECV)) {
1197      /* only if they are not the same socket or we didn't have a readable
1198         one, we increase index */
1199      if(data->req.keepon & KEEP_RECV)
1200        sockindex++; /* increase index if we need two entries */
1201
1202      DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1203
1204      sock[sockindex] = conn->writesockfd;
1205    }
1206
1207    bitmap |= GETSOCK_WRITESOCK(sockindex);
1208  }
1209
1210  return bitmap;
1211}
1212
1213/*
1214 * Determine optimum sleep time based on configured rate, current rate,
1215 * and packet size.
1216 * Returns value in mili-seconds.
1217 *
1218 * The basic idea is to adjust the desired rate up/down in this method
1219 * based on whether we are running too slow or too fast.  Then, calculate
1220 * how many miliseconds to wait for the next packet to achieve this new
1221 * rate.
1222 */
1223long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
1224                             int pkt_size)
1225{
1226  curl_off_t min_sleep = 0;
1227  curl_off_t rv = 0;
1228
1229  if(rate_bps == 0)
1230    return 0;
1231
1232  /* If running faster than about .1% of the desired speed, slow
1233   * us down a bit.  Use shift instead of division as the 0.1%
1234   * cutoff is arbitrary anyway.
1235   */
1236  if(cur_rate_bps > (rate_bps + (rate_bps >> 10))) {
1237    /* running too fast, decrease target rate by 1/64th of rate */
1238    rate_bps -= rate_bps >> 6;
1239    min_sleep = 1;
1240  }
1241  else if(cur_rate_bps < (rate_bps - (rate_bps >> 10))) {
1242    /* running too slow, increase target rate by 1/64th of rate */
1243    rate_bps += rate_bps >> 6;
1244  }
1245
1246  /* Determine number of miliseconds to wait until we do
1247   * the next packet at the adjusted rate.  We should wait
1248   * longer when using larger packets, for instance.
1249   */
1250  rv = ((curl_off_t)((pkt_size * 8) * 1000) / rate_bps);
1251
1252  /* Catch rounding errors and always slow down at least 1ms if
1253   * we are running too fast.
1254   */
1255  if(rv < min_sleep)
1256    rv = min_sleep;
1257
1258  /* Bound value to fit in 'long' on 32-bit platform.  That's
1259   * plenty long enough anyway!
1260   */
1261  if(rv > 0x7fffffff)
1262    rv = 0x7fffffff;
1263
1264  return (long)rv;
1265}
1266
1267
1268/*
1269 * Transfer()
1270 *
1271 * This function is what performs the actual transfer. It is capable of doing
1272 * both ways simultaneously.  The transfer must already have been setup by a
1273 * call to Curl_setup_transfer().
1274 *
1275 * Note that headers are created in a preallocated buffer of a default size.
1276 * That buffer can be enlarged on demand, but it is never shrunken again.
1277 *
1278 */
1279
1280static CURLcode
1281Transfer(struct connectdata *conn)
1282{
1283  CURLcode result;
1284  struct SessionHandle *data = conn->data;
1285  struct SingleRequest *k = &data->req;
1286  bool done=FALSE;
1287  bool first=TRUE;
1288  long timeout_ms;
1289  int buffersize;
1290  long totmp;
1291
1292  if((conn->sockfd == CURL_SOCKET_BAD) &&
1293     (conn->writesockfd == CURL_SOCKET_BAD))
1294    /* nothing to read, nothing to write, we're already OK! */
1295    return CURLE_OK;
1296
1297  /* we want header and/or body, if neither then don't do this! */
1298  if(!k->getheader && data->set.opt_no_body)
1299    return CURLE_OK;
1300
1301  while(!done) {
1302    curl_socket_t fd_read = conn->sockfd;
1303    curl_socket_t fd_write = conn->writesockfd;
1304    int keepon = k->keepon;
1305    timeout_ms = 1000;
1306
1307    if(conn->waitfor) {
1308      /* if waitfor is set, get the RECV and SEND bits from that but keep the
1309         other bits */
1310      keepon &= ~ (KEEP_RECV|KEEP_SEND);
1311      keepon |= conn->waitfor & (KEEP_RECV|KEEP_SEND);
1312    }
1313
1314    /* limit-rate logic: if speed exceeds threshold, then do not include fd in
1315       select set. The current speed is recalculated in each Curl_readwrite()
1316       call */
1317    if((keepon & KEEP_SEND) &&
1318        (!data->set.max_send_speed ||
1319         (data->progress.ulspeed < data->set.max_send_speed) )) {
1320      k->keepon &= ~KEEP_SEND_HOLD;
1321    }
1322    else {
1323      if(data->set.upload && data->set.max_send_speed &&
1324         (data->progress.ulspeed > data->set.max_send_speed) ) {
1325        /* calculate upload rate-limitation timeout. */
1326        buffersize = (int)(data->set.buffer_size ?
1327                           data->set.buffer_size : BUFSIZE);
1328        totmp = Curl_sleep_time(data->set.max_send_speed,
1329                                data->progress.ulspeed, buffersize);
1330        if(totmp < timeout_ms)
1331          timeout_ms = totmp;
1332      }
1333      fd_write = CURL_SOCKET_BAD;
1334      if(keepon & KEEP_SEND)
1335        k->keepon |= KEEP_SEND_HOLD; /* hold it */
1336    }
1337
1338    if((keepon & KEEP_RECV) &&
1339        (!data->set.max_recv_speed ||
1340         (data->progress.dlspeed < data->set.max_recv_speed)) ) {
1341      k->keepon &= ~KEEP_RECV_HOLD;
1342    }
1343    else {
1344      if((!data->set.upload) && data->set.max_recv_speed &&
1345         (data->progress.dlspeed > data->set.max_recv_speed)) {
1346        /* Calculate download rate-limitation timeout. */
1347        buffersize = (int)(data->set.buffer_size ?
1348                           data->set.buffer_size : BUFSIZE);
1349        totmp = Curl_sleep_time(data->set.max_recv_speed,
1350                                data->progress.dlspeed, buffersize);
1351        if(totmp < timeout_ms)
1352          timeout_ms = totmp;
1353      }
1354      fd_read = CURL_SOCKET_BAD;
1355      if(keepon & KEEP_RECV)
1356        k->keepon |= KEEP_RECV_HOLD; /* hold it */
1357    }
1358
1359    /* pause logic. Don't check descriptors for paused connections */
1360    if(k->keepon & KEEP_RECV_PAUSE)
1361      fd_read = CURL_SOCKET_BAD;
1362    if(k->keepon & KEEP_SEND_PAUSE)
1363      fd_write = CURL_SOCKET_BAD;
1364
1365    /* The *_HOLD and *_PAUSE logic is necessary since even though there might
1366       be no traffic during the select interval, we still call
1367       Curl_readwrite() for the timeout case and if we limit transfer speed we
1368       must make sure that this function doesn't transfer anything while in
1369       HOLD status.
1370
1371       The no timeout for the first round is for the protocols for which data
1372       has already been slurped off the socket and thus waiting for action
1373       won't work since it'll wait even though there is already data present
1374       to work with. */
1375    if(first &&
1376       ((fd_read != CURL_SOCKET_BAD) || (fd_write != CURL_SOCKET_BAD)))
1377      /* if this is the first lap and one of the file descriptors is fine
1378         to work with, skip the timeout */
1379      timeout_ms = 0;
1380    else {
1381      totmp = Curl_timeleft(data, &k->now, FALSE);
1382      if(totmp < 0)
1383        return CURLE_OPERATION_TIMEDOUT;
1384      else if(!totmp)
1385        totmp = 1000;
1386
1387      if(totmp < timeout_ms)
1388        timeout_ms = totmp;
1389    }
1390
1391    switch (Curl_socket_ready(fd_read, fd_write, timeout_ms)) {
1392    case -1: /* select() error, stop reading */
1393#ifdef EINTR
1394      /* The EINTR is not serious, and it seems you might get this more
1395         often when using the lib in a multi-threaded environment! */
1396      if(SOCKERRNO == EINTR)
1397        continue;
1398#endif
1399      return CURLE_RECV_ERROR;  /* indicate a network problem */
1400    case 0:  /* timeout */
1401    default: /* readable descriptors */
1402
1403      result = Curl_readwrite(conn, &done);
1404      /* "done" signals to us if the transfer(s) are ready */
1405      break;
1406    }
1407    if(result)
1408      return result;
1409
1410    first = FALSE; /* not the first lap anymore */
1411  }
1412
1413  return CURLE_OK;
1414}
1415
1416static CURLcode loadhostpairs(struct SessionHandle *data)
1417{
1418  struct curl_slist *hostp;
1419  char hostname[256];
1420  char address[256];
1421  int port;
1422
1423  for(hostp = data->change.resolve; hostp; hostp = hostp->next ) {
1424    if(!hostp->data)
1425      continue;
1426    if(hostp->data[0] == '-') {
1427      /* TODO: mark an entry for removal */
1428    }
1429    else if(3 == sscanf(hostp->data, "%255[^:]:%d:%255s", hostname, &port,
1430                        address)) {
1431      struct Curl_dns_entry *dns;
1432      Curl_addrinfo *addr;
1433
1434      addr = Curl_str2addr(address, port);
1435      if(!addr) {
1436        infof(data, "Resolve %s found illegal!\n", hostp->data);
1437        continue;
1438      }
1439      infof(data, "Added %s:%d:%s to DNS cache\n",
1440            hostname, port, address);
1441
1442      if(data->share)
1443        Curl_share_lock(data, CURL_LOCK_DATA_DNS, CURL_LOCK_ACCESS_SINGLE);
1444
1445      /* put this host in the cache */
1446      dns = Curl_cache_addr(data, addr, hostname, port);
1447
1448      if(data->share)
1449        Curl_share_unlock(data, CURL_LOCK_DATA_DNS);
1450
1451      if(!dns)
1452        return CURLE_OUT_OF_MEMORY;
1453    }
1454  }
1455  data->change.resolve = NULL; /* dealt with now */
1456
1457  return CURLE_OK;
1458}
1459
1460
1461/*
1462 * Curl_pretransfer() is called immediately before a transfer starts.
1463 */
1464CURLcode Curl_pretransfer(struct SessionHandle *data)
1465{
1466  CURLcode res;
1467  if(!data->change.url) {
1468    /* we can't do anything without URL */
1469    failf(data, "No URL set!");
1470    return CURLE_URL_MALFORMAT;
1471  }
1472
1473  /* Init the SSL session ID cache here. We do it here since we want to do it
1474     after the *_setopt() calls (that could change the size of the cache) but
1475     before any transfer takes place. */
1476  res = Curl_ssl_initsessions(data, data->set.ssl.numsessions);
1477  if(res)
1478    return res;
1479
1480  data->set.followlocation=0; /* reset the location-follow counter */
1481  data->state.this_is_a_follow = FALSE; /* reset this */
1482  data->state.errorbuf = FALSE; /* no error has occurred */
1483  data->state.httpversion = 0; /* don't assume any particular server version */
1484
1485  data->state.ssl_connect_retry = FALSE;
1486
1487  data->state.authproblem = FALSE;
1488  data->state.authhost.want = data->set.httpauth;
1489  data->state.authproxy.want = data->set.proxyauth;
1490  Curl_safefree(data->info.wouldredirect);
1491  data->info.wouldredirect = NULL;
1492
1493  /* If there is a list of cookie files to read, do it now! */
1494  if(data->change.cookielist)
1495    Curl_cookie_loadfiles(data);
1496
1497  /* If there is a list of host pairs to deal with */
1498  if(data->change.resolve)
1499    res = loadhostpairs(data);
1500
1501  if(!res) {
1502    /* Allow data->set.use_port to set which port to use. This needs to be
1503     * disabled for example when we follow Location: headers to URLs using
1504     * different ports! */
1505    data->state.allow_port = TRUE;
1506
1507#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1508    /*************************************************************
1509     * Tell signal handler to ignore SIGPIPE
1510     *************************************************************/
1511    if(!data->set.no_signal)
1512      data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1513#endif
1514
1515    Curl_initinfo(data); /* reset session-specific information "variables" */
1516    Curl_pgrsStartNow(data);
1517
1518    if(data->set.timeout)
1519      Curl_expire(data, data->set.timeout);
1520
1521    if(data->set.connecttimeout)
1522      Curl_expire(data, data->set.connecttimeout);
1523  }
1524
1525  return res;
1526}
1527
1528/*
1529 * Curl_posttransfer() is called immediately after a transfer ends
1530 */
1531CURLcode Curl_posttransfer(struct SessionHandle *data)
1532{
1533#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1534  /* restore the signal handler for SIGPIPE before we get back */
1535  if(!data->set.no_signal)
1536    signal(SIGPIPE, data->state.prev_signal);
1537#else
1538  (void)data; /* unused parameter */
1539#endif
1540
1541  return CURLE_OK;
1542}
1543
1544#ifndef CURL_DISABLE_HTTP
1545/*
1546 * strlen_url() returns the length of the given URL if the spaces within the
1547 * URL were properly URL encoded.
1548 */
1549static size_t strlen_url(const char *url)
1550{
1551  const char *ptr;
1552  size_t newlen=0;
1553  bool left=TRUE; /* left side of the ? */
1554
1555  for(ptr=url; *ptr; ptr++) {
1556    switch(*ptr) {
1557    case '?':
1558      left=FALSE;
1559      /* fall through */
1560    default:
1561      newlen++;
1562      break;
1563    case ' ':
1564      if(left)
1565        newlen+=3;
1566      else
1567        newlen++;
1568      break;
1569    }
1570  }
1571  return newlen;
1572}
1573
1574/* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1575 * the source URL accordingly.
1576 */
1577static void strcpy_url(char *output, const char *url)
1578{
1579  /* we must add this with whitespace-replacing */
1580  bool left=TRUE;
1581  const char *iptr;
1582  char *optr = output;
1583  for(iptr = url;    /* read from here */
1584      *iptr;         /* until zero byte */
1585      iptr++) {
1586    switch(*iptr) {
1587    case '?':
1588      left=FALSE;
1589      /* fall through */
1590    default:
1591      *optr++=*iptr;
1592      break;
1593    case ' ':
1594      if(left) {
1595        *optr++='%'; /* add a '%' */
1596        *optr++='2'; /* add a '2' */
1597        *optr++='0'; /* add a '0' */
1598      }
1599      else
1600        *optr++='+'; /* add a '+' here */
1601      break;
1602    }
1603  }
1604  *optr=0; /* zero terminate output buffer */
1605
1606}
1607
1608/*
1609 * Returns true if the given URL is absolute (as opposed to relative)
1610 */
1611static bool is_absolute_url(const char *url)
1612{
1613  char prot[16]; /* URL protocol string storage */
1614  char letter;   /* used for a silly sscanf */
1615
1616  return (bool)(2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter));
1617}
1618
1619/*
1620 * Concatenate a relative URL to a base URL making it absolute.
1621 * URL-encodes any spaces.
1622 * The returned pointer must be freed by the caller unless NULL
1623 * (returns NULL on out of memory).
1624 */
1625static char *concat_url(const char *base, const char *relurl)
1626{
1627  /***
1628   TRY to append this new path to the old URL
1629   to the right of the host part. Oh crap, this is doomed to cause
1630   problems in the future...
1631  */
1632  char *newest;
1633  char *protsep;
1634  char *pathsep;
1635  size_t newlen;
1636
1637  const char *useurl = relurl;
1638  size_t urllen;
1639
1640  /* we must make our own copy of the URL to play with, as it may
1641     point to read-only data */
1642  char *url_clone=strdup(base);
1643
1644  if(!url_clone)
1645    return NULL; /* skip out of this NOW */
1646
1647  /* protsep points to the start of the host name */
1648  protsep=strstr(url_clone, "//");
1649  if(!protsep)
1650    protsep=url_clone;
1651  else
1652    protsep+=2; /* pass the slashes */
1653
1654  if('/' != relurl[0]) {
1655    int level=0;
1656
1657    /* First we need to find out if there's a ?-letter in the URL,
1658       and cut it and the right-side of that off */
1659    pathsep = strchr(protsep, '?');
1660    if(pathsep)
1661      *pathsep=0;
1662
1663    /* we have a relative path to append to the last slash if there's one
1664       available, or if the new URL is just a query string (starts with a
1665       '?')  we append the new one at the end of the entire currently worked
1666       out URL */
1667    if(useurl[0] != '?') {
1668      pathsep = strrchr(protsep, '/');
1669      if(pathsep)
1670        *pathsep=0;
1671    }
1672
1673    /* Check if there's any slash after the host name, and if so, remember
1674       that position instead */
1675    pathsep = strchr(protsep, '/');
1676    if(pathsep)
1677      protsep = pathsep+1;
1678    else
1679      protsep = NULL;
1680
1681    /* now deal with one "./" or any amount of "../" in the newurl
1682       and act accordingly */
1683
1684    if((useurl[0] == '.') && (useurl[1] == '/'))
1685      useurl+=2; /* just skip the "./" */
1686
1687    while((useurl[0] == '.') &&
1688          (useurl[1] == '.') &&
1689          (useurl[2] == '/')) {
1690      level++;
1691      useurl+=3; /* pass the "../" */
1692    }
1693
1694    if(protsep) {
1695      while(level--) {
1696        /* cut off one more level from the right of the original URL */
1697        pathsep = strrchr(protsep, '/');
1698        if(pathsep)
1699          *pathsep=0;
1700        else {
1701          *protsep=0;
1702          break;
1703        }
1704      }
1705    }
1706  }
1707  else {
1708    /* We got a new absolute path for this server, cut off from the
1709       first slash */
1710    pathsep = strchr(protsep, '/');
1711    if(pathsep) {
1712      /* When people use badly formatted URLs, such as
1713         "http://www.url.com?dir=/home/daniel" we must not use the first
1714         slash, if there's a ?-letter before it! */
1715      char *sep = strchr(protsep, '?');
1716      if(sep && (sep < pathsep))
1717        pathsep = sep;
1718      *pathsep=0;
1719    }
1720    else {
1721      /* There was no slash. Now, since we might be operating on a badly
1722         formatted URL, such as "http://www.url.com?id=2380" which doesn't
1723         use a slash separator as it is supposed to, we need to check for a
1724         ?-letter as well! */
1725      pathsep = strchr(protsep, '?');
1726      if(pathsep)
1727        *pathsep=0;
1728    }
1729  }
1730
1731  /* If the new part contains a space, this is a mighty stupid redirect
1732     but we still make an effort to do "right". To the left of a '?'
1733     letter we replace each space with %20 while it is replaced with '+'
1734     on the right side of the '?' letter.
1735  */
1736  newlen = strlen_url(useurl);
1737
1738  urllen = strlen(url_clone);
1739
1740  newest = malloc( urllen + 1 + /* possible slash */
1741                         newlen + 1 /* zero byte */);
1742
1743  if(!newest) {
1744    free(url_clone); /* don't leak this */
1745    return NULL;
1746  }
1747
1748  /* copy over the root url part */
1749  memcpy(newest, url_clone, urllen);
1750
1751  /* check if we need to append a slash */
1752  if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1753    ;
1754  else
1755    newest[urllen++]='/';
1756
1757  /* then append the new piece on the right side */
1758  strcpy_url(&newest[urllen], useurl);
1759
1760  free(url_clone);
1761
1762  return newest;
1763}
1764#endif /* CURL_DISABLE_HTTP */
1765
1766/*
1767 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1768 * as given by the remote server and set up the new URL to request.
1769 */
1770CURLcode Curl_follow(struct SessionHandle *data,
1771                     char *newurl, /* this 'newurl' is the Location: string,
1772                                      and it must be malloc()ed before passed
1773                                      here */
1774                     followtype type) /* see transfer.h */
1775{
1776#ifdef CURL_DISABLE_HTTP
1777  (void)data;
1778  (void)newurl;
1779  (void)type;
1780  /* Location: following will not happen when HTTP is disabled */
1781  return CURLE_TOO_MANY_REDIRECTS;
1782#else
1783
1784  /* Location: redirect */
1785  bool disallowport = FALSE;
1786
1787  if(type == FOLLOW_REDIR) {
1788    if((data->set.maxredirs != -1) &&
1789        (data->set.followlocation >= data->set.maxredirs)) {
1790      failf(data,"Maximum (%ld) redirects followed", data->set.maxredirs);
1791      return CURLE_TOO_MANY_REDIRECTS;
1792    }
1793
1794    /* mark the next request as a followed location: */
1795    data->state.this_is_a_follow = TRUE;
1796
1797    data->set.followlocation++; /* count location-followers */
1798
1799    if(data->set.http_auto_referer) {
1800      /* We are asked to automatically set the previous URL as the referer
1801         when we get the next URL. We pick the ->url field, which may or may
1802         not be 100% correct */
1803
1804      if(data->change.referer_alloc)
1805        /* If we already have an allocated referer, free this first */
1806        free(data->change.referer);
1807
1808      data->change.referer = strdup(data->change.url);
1809      if(!data->change.referer) {
1810        data->change.referer_alloc = FALSE;
1811        return CURLE_OUT_OF_MEMORY;
1812      }
1813      data->change.referer_alloc = TRUE; /* yes, free this later */
1814    }
1815  }
1816
1817  if(!is_absolute_url(newurl))  {
1818    /***
1819     *DANG* this is an RFC 2068 violation. The URL is supposed
1820     to be absolute and this doesn't seem to be that!
1821     */
1822    char *absolute = concat_url(data->change.url, newurl);
1823    if(!absolute)
1824      return CURLE_OUT_OF_MEMORY;
1825    free(newurl);
1826    newurl = absolute;
1827  }
1828  else {
1829    /* This is an absolute URL, don't allow the custom port number */
1830    disallowport = TRUE;
1831
1832    if(strchr(newurl, ' ')) {
1833      /* This new URL contains at least one space, this is a mighty stupid
1834         redirect but we still make an effort to do "right". */
1835      char *newest;
1836      size_t newlen = strlen_url(newurl);
1837
1838      newest = malloc(newlen+1); /* get memory for this */
1839      if(!newest)
1840        return CURLE_OUT_OF_MEMORY;
1841      strcpy_url(newest, newurl); /* create a space-free URL */
1842
1843      free(newurl); /* that was no good */
1844      newurl = newest; /* use this instead now */
1845    }
1846
1847  }
1848
1849  if(type == FOLLOW_FAKE) {
1850    /* we're only figuring out the new url if we would've followed locations
1851       but now we're done so we can get out! */
1852    data->info.wouldredirect = newurl;
1853    return CURLE_OK;
1854  }
1855
1856  if(disallowport)
1857    data->state.allow_port = FALSE;
1858
1859  if(data->change.url_alloc)
1860    free(data->change.url);
1861  else
1862    data->change.url_alloc = TRUE; /* the URL is allocated */
1863
1864  data->change.url = newurl;
1865  newurl = NULL; /* don't free! */
1866
1867  infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1868
1869  /*
1870   * We get here when the HTTP code is 300-399 (and 401). We need to perform
1871   * differently based on exactly what return code there was.
1872   *
1873   * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1874   * a HTTP (proxy-) authentication scheme other than Basic.
1875   */
1876  switch(data->info.httpcode) {
1877    /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1878       Authorization: XXXX header in the HTTP request code snippet */
1879    /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1880       Proxy-Authorization: XXXX header in the HTTP request code snippet */
1881    /* 300 - Multiple Choices */
1882    /* 306 - Not used */
1883    /* 307 - Temporary Redirect */
1884  default:  /* for all above (and the unknown ones) */
1885    /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1886     * seem to be OK to POST to.
1887     */
1888    break;
1889  case 301: /* Moved Permanently */
1890    /* (quote from RFC2616, section 10.3.2):
1891     *
1892     * Note: When automatically redirecting a POST request after receiving a
1893     * 301 status code, some existing HTTP/1.0 user agents will erroneously
1894     * change it into a GET request.
1895     *
1896     * ----
1897     *
1898     * Warning: Because most of importants user agents do this obvious RFC2616
1899     * violation, many webservers expect this misbehavior. So these servers
1900     * often answers to a POST request with an error page.  To be sure that
1901     * libcurl gets the page that most user agents would get, libcurl has to
1902     * force GET.
1903     *
1904     * This behaviour can be overridden with CURLOPT_POSTREDIR.
1905     */
1906    if((data->set.httpreq == HTTPREQ_POST
1907        || data->set.httpreq == HTTPREQ_POST_FORM)
1908       && !data->set.post301) {
1909      infof(data,
1910            "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
1911      data->set.httpreq = HTTPREQ_GET;
1912    }
1913    break;
1914  case 302: /* Found */
1915    /* (From 10.3.3)
1916
1917    Note: RFC 1945 and RFC 2068 specify that the client is not allowed
1918    to change the method on the redirected request.  However, most
1919    existing user agent implementations treat 302 as if it were a 303
1920    response, performing a GET on the Location field-value regardless
1921    of the original request method. The status codes 303 and 307 have
1922    been added for servers that wish to make unambiguously clear which
1923    kind of reaction is expected of the client.
1924
1925    (From 10.3.4)
1926
1927    Note: Many pre-HTTP/1.1 user agents do not understand the 303
1928    status. When interoperability with such clients is a concern, the
1929    302 status code may be used instead, since most user agents react
1930    to a 302 response as described here for 303.
1931
1932    This behaviour can be overriden with CURLOPT_POSTREDIR
1933    */
1934    if((data->set.httpreq == HTTPREQ_POST
1935        || data->set.httpreq == HTTPREQ_POST_FORM)
1936       && !data->set.post302) {
1937      infof(data,
1938            "Violate RFC 2616/10.3.3 and switch from POST to GET\n");
1939      data->set.httpreq = HTTPREQ_GET;
1940    }
1941    break;
1942
1943  case 303: /* See Other */
1944    /* Disable both types of POSTs, since doing a second POST when
1945     * following isn't what anyone would want! */
1946    if(data->set.httpreq != HTTPREQ_GET) {
1947      data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1948      infof(data, "Disables POST, goes with %s\n",
1949            data->set.opt_no_body?"HEAD":"GET");
1950    }
1951    break;
1952  case 304: /* Not Modified */
1953    /* 304 means we did a conditional request and it was "Not modified".
1954     * We shouldn't get any Location: header in this response!
1955     */
1956    break;
1957  case 305: /* Use Proxy */
1958    /* (quote from RFC2616, section 10.3.6):
1959     * "The requested resource MUST be accessed through the proxy given
1960     * by the Location field. The Location field gives the URI of the
1961     * proxy.  The recipient is expected to repeat this single request
1962     * via the proxy. 305 responses MUST only be generated by origin
1963     * servers."
1964     */
1965    break;
1966  }
1967  Curl_pgrsTime(data, TIMER_REDIRECT);
1968  Curl_pgrsResetTimes(data);
1969
1970  return CURLE_OK;
1971#endif /* CURL_DISABLE_HTTP */
1972}
1973
1974static CURLcode
1975connect_host(struct SessionHandle *data,
1976             struct connectdata **conn)
1977{
1978  CURLcode res = CURLE_OK;
1979
1980  bool async;
1981  bool protocol_done=TRUE; /* will be TRUE always since this is only used
1982                                within the easy interface */
1983  Curl_pgrsTime(data, TIMER_STARTSINGLE);
1984  res = Curl_connect(data, conn, &async, &protocol_done);
1985
1986  if((CURLE_OK == res) && async) {
1987    /* Now, if async is TRUE here, we need to wait for the name
1988       to resolve */
1989    res = Curl_resolver_wait_resolv(*conn, NULL);
1990    if(CURLE_OK == res)
1991      /* Resolved, continue with the connection */
1992      res = Curl_async_resolved(*conn, &protocol_done);
1993    else
1994      /* if we can't resolve, we kill this "connection" now */
1995      (void)Curl_disconnect(*conn, /* dead_connection */ FALSE);
1996  }
1997
1998  return res;
1999}
2000
2001CURLcode
2002Curl_reconnect_request(struct connectdata **connp)
2003{
2004  CURLcode result = CURLE_OK;
2005  struct connectdata *conn = *connp;
2006  struct SessionHandle *data = conn->data;
2007
2008  /* This was a re-use of a connection and we got a write error in the
2009   * DO-phase. Then we DISCONNECT this connection and have another attempt to
2010   * CONNECT and then DO again! The retry cannot possibly find another
2011   * connection to re-use, since we only keep one possible connection for
2012   * each.  */
2013
2014  infof(data, "Re-used connection seems dead, get a new one\n");
2015
2016  conn->bits.close = TRUE; /* enforce close of this connection */
2017  result = Curl_done(&conn, result, FALSE); /* we are so done with this */
2018
2019  /* conn may no longer be a good pointer */
2020
2021  /*
2022   * According to bug report #1330310. We need to check for CURLE_SEND_ERROR
2023   * here as well. I figure this could happen when the request failed on a FTP
2024   * connection and thus Curl_done() itself tried to use the connection
2025   * (again). Slight Lack of feedback in the report, but I don't think this
2026   * extra check can do much harm.
2027   */
2028  if((CURLE_OK == result) || (CURLE_SEND_ERROR == result)) {
2029    bool async;
2030    bool protocol_done = TRUE;
2031
2032    /* Now, redo the connect and get a new connection */
2033    result = Curl_connect(data, connp, &async, &protocol_done);
2034    if(CURLE_OK == result) {
2035      /* We have connected or sent away a name resolve query fine */
2036
2037      conn = *connp; /* setup conn to again point to something nice */
2038      if(async) {
2039        /* Now, if async is TRUE here, we need to wait for the name
2040           to resolve */
2041        result = Curl_resolver_wait_resolv(conn, NULL);
2042        if(result)
2043          return result;
2044
2045        /* Resolved, continue with the connection */
2046        result = Curl_async_resolved(conn, &protocol_done);
2047        if(result)
2048          return result;
2049      }
2050    }
2051  }
2052
2053  return result;
2054}
2055
2056/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
2057
2058   NOTE: that the *url is malloc()ed. */
2059CURLcode Curl_retry_request(struct connectdata *conn,
2060                            char **url)
2061{
2062  struct SessionHandle *data = conn->data;
2063
2064  *url = NULL;
2065
2066  /* if we're talking upload, we can't do the checks below, unless the protocol
2067     is HTTP as when uploading over HTTP we will still get a response */
2068  if(data->set.upload &&
2069     !(conn->handler->protocol&(CURLPROTO_HTTP|CURLPROTO_RTSP)))
2070    return CURLE_OK;
2071
2072  if(/* workaround for broken TLS servers */ data->state.ssl_connect_retry ||
2073      ((data->req.bytecount +
2074        data->req.headerbytecount == 0) &&
2075        conn->bits.reuse &&
2076        !data->set.opt_no_body &&
2077        data->set.rtspreq != RTSPREQ_RECEIVE)) {
2078    /* We got no data, we attempted to re-use a connection and yet we want a
2079       "body". This might happen if the connection was left alive when we were
2080       done using it before, but that was closed when we wanted to read from
2081       it again. Bad luck. Retry the same request on a fresh connect! */
2082    infof(conn->data, "Connection died, retrying a fresh connect\n");
2083    *url = strdup(conn->data->change.url);
2084    if(!*url)
2085      return CURLE_OUT_OF_MEMORY;
2086
2087    conn->bits.close = TRUE; /* close this connection */
2088    conn->bits.retry = TRUE; /* mark this as a connection we're about
2089                                to retry. Marking it this way should
2090                                prevent i.e HTTP transfers to return
2091                                error just because nothing has been
2092                                transferred! */
2093
2094    if(data->state.proto.http->writebytecount)
2095      Curl_readrewind(conn);
2096  }
2097  return CURLE_OK;
2098}
2099
2100static CURLcode Curl_do_perform(struct SessionHandle *data)
2101{
2102  CURLcode res;
2103  CURLcode res2;
2104  struct connectdata *conn=NULL;
2105  char *newurl = NULL; /* possibly a new URL to follow to! */
2106  followtype follow = FOLLOW_NONE;
2107
2108  data->state.used_interface = Curl_if_easy;
2109
2110  res = Curl_pretransfer(data);
2111  if(res)
2112    return res;
2113
2114  /*
2115   * It is important that there is NO 'return' from this function at any other
2116   * place than falling down to the end of the function! This is because we
2117   * have cleanup stuff that must be done before we get back, and that is only
2118   * performed after this do-while loop.
2119   */
2120
2121  for(;;) {
2122    res = connect_host(data, &conn);   /* primary connection */
2123
2124    if(res == CURLE_OK) {
2125      bool do_done;
2126      if(data->set.connect_only) {
2127        /* keep connection open for application to use the socket */
2128        conn->bits.close = FALSE;
2129        res = Curl_done(&conn, CURLE_OK, FALSE);
2130        break;
2131      }
2132      res = Curl_do(&conn, &do_done);
2133
2134      if(res == CURLE_OK) {
2135        if(conn->data->set.wildcardmatch) {
2136          if(conn->data->wildcard.state == CURLWC_DONE ||
2137             conn->data->wildcard.state == CURLWC_SKIP) {
2138            /* keep connection open for application to use the socket */
2139            conn->bits.close = FALSE;
2140            res = Curl_done(&conn, CURLE_OK, FALSE);
2141            break;
2142          }
2143        }
2144        res = Transfer(conn); /* now fetch that URL please */
2145        if((res == CURLE_OK) || (res == CURLE_RECV_ERROR)) {
2146          bool retry = FALSE;
2147          CURLcode rc = Curl_retry_request(conn, &newurl);
2148          if(rc)
2149            res = rc;
2150          else
2151            retry = (newurl?TRUE:FALSE);
2152
2153          if(retry) {
2154            /* we know (newurl != NULL) at this point */
2155            res = CURLE_OK;
2156            follow = FOLLOW_RETRY;
2157          }
2158          else if(res == CURLE_OK) {
2159            /*
2160             * We must duplicate the new URL here as the connection data may
2161             * be free()ed in the Curl_done() function. We prefer the newurl
2162             * one since that's used for redirects or just further requests
2163             * for retries or multi-stage HTTP auth methods etc.
2164             */
2165            if(data->req.newurl) {
2166              follow = FOLLOW_REDIR;
2167              newurl = strdup(data->req.newurl);
2168              if(!newurl)
2169                res = CURLE_OUT_OF_MEMORY;
2170            }
2171            else if(data->req.location) {
2172              follow = FOLLOW_FAKE;
2173              newurl = strdup(data->req.location);
2174              if(!newurl)
2175                res = CURLE_OUT_OF_MEMORY;
2176            }
2177          }
2178
2179          /* in the above cases where 'newurl' gets assigned, we have a fresh
2180           * allocated memory pointed to */
2181        }
2182        if(res != CURLE_OK) {
2183          /* The transfer phase returned error, we mark the connection to get
2184           * closed to prevent being re-used. This is because we can't
2185           * possibly know if the connection is in a good shape or not now. */
2186          conn->bits.close = TRUE;
2187
2188          if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) {
2189            /* if we failed anywhere, we must clean up the secondary socket if
2190               it was used */
2191            Curl_closesocket(conn, conn->sock[SECONDARYSOCKET]);
2192            conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD;
2193          }
2194        }
2195
2196        /* Always run Curl_done(), even if some of the previous calls
2197           failed, but return the previous (original) error code */
2198        res2 = Curl_done(&conn, res, FALSE);
2199
2200        if(CURLE_OK == res)
2201          res = res2;
2202      }
2203      else if(conn)
2204        /* Curl_do() failed, clean up left-overs in the done-call, but note
2205           that at some cases the conn pointer is NULL when Curl_do() failed
2206           and the connection cache is very small so only call Curl_done() if
2207           conn is still "alive". */
2208        /* ignore return code since we already have an error to return */
2209        (void)Curl_done(&conn, res, FALSE);
2210
2211      /*
2212       * Important: 'conn' cannot be used here, since it may have been closed
2213       * in 'Curl_done' or other functions.
2214       */
2215
2216      if((res == CURLE_OK) && follow) {
2217        res = Curl_follow(data, newurl, follow);
2218        if(CURLE_OK == res) {
2219          /* if things went fine, Curl_follow() freed or otherwise took
2220             responsibility for the newurl pointer */
2221          newurl = NULL;
2222          if(follow >= FOLLOW_RETRY) {
2223            follow = FOLLOW_NONE;
2224            continue;
2225          }
2226          /* else we break out of the loop below */
2227        }
2228      }
2229    }
2230    break; /* it only reaches here when this shouldn't loop */
2231
2232  } /* loop if Location: */
2233
2234  if(newurl)
2235    free(newurl);
2236
2237  if(res && !data->state.errorbuf) {
2238    /*
2239     * As an extra precaution: if no error string has been set and there was
2240     * an error, use the strerror() string or if things are so bad that not
2241     * even that is good, set a bad string that mentions the error code.
2242     */
2243    const char *str = curl_easy_strerror(res);
2244    if(!str)
2245      failf(data, "unspecified error %d", (int)res);
2246    else
2247      failf(data, "%s", str);
2248  }
2249
2250  /* run post-transfer unconditionally, but don't clobber the return code if
2251     we already have an error code recorder */
2252  res2 = Curl_posttransfer(data);
2253  if(!res && res2)
2254    res = res2;
2255
2256  return res;
2257}
2258
2259/*
2260 * Curl_perform() is the internal high-level function that gets called by the
2261 * external curl_easy_perform() function. It inits, performs and cleans up a
2262 * single file transfer.
2263 */
2264CURLcode Curl_perform(struct SessionHandle *data)
2265{
2266  CURLcode res;
2267  if(!data->set.wildcardmatch)
2268    return Curl_do_perform(data);
2269
2270  /* init main wildcard structures */
2271  res = Curl_wildcard_init(&data->wildcard);
2272  if(res)
2273    return res;
2274
2275  res = Curl_do_perform(data);
2276  if(res) {
2277    Curl_wildcard_dtor(&data->wildcard);
2278    return res;
2279  }
2280
2281  /* wildcard loop */
2282  while(!res && data->wildcard.state != CURLWC_DONE)
2283    res = Curl_do_perform(data);
2284
2285  Curl_wildcard_dtor(&data->wildcard);
2286
2287  /* wildcard download finished or failed */
2288  data->wildcard.state = CURLWC_INIT;
2289  return res;
2290}
2291
2292/*
2293 * Curl_setup_transfer() is called to setup some basic properties for the
2294 * upcoming transfer.
2295 */
2296void
2297Curl_setup_transfer(
2298  struct connectdata *conn, /* connection data */
2299  int sockindex,            /* socket index to read from or -1 */
2300  curl_off_t size,          /* -1 if unknown at this point */
2301  bool getheader,           /* TRUE if header parsing is wanted */
2302  curl_off_t *bytecountp,   /* return number of bytes read or NULL */
2303  int writesockindex,       /* socket index to write to, it may very well be
2304                               the same we read from. -1 disables */
2305  curl_off_t *writecountp   /* return number of bytes written or NULL */
2306  )
2307{
2308  struct SessionHandle *data;
2309  struct SingleRequest *k;
2310
2311  DEBUGASSERT(conn != NULL);
2312
2313  data = conn->data;
2314  k = &data->req;
2315
2316  DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
2317
2318  /* now copy all input parameters */
2319  conn->sockfd = sockindex == -1 ?
2320      CURL_SOCKET_BAD : conn->sock[sockindex];
2321  conn->writesockfd = writesockindex == -1 ?
2322      CURL_SOCKET_BAD:conn->sock[writesockindex];
2323  k->getheader = getheader;
2324
2325  k->size = size;
2326  k->bytecountp = bytecountp;
2327  k->writebytecountp = writecountp;
2328
2329  /* The code sequence below is placed in this function just because all
2330     necessary input is not always known in do_complete() as this function may
2331     be called after that */
2332
2333  if(!k->getheader) {
2334    k->header = FALSE;
2335    if(size > 0)
2336      Curl_pgrsSetDownloadSize(data, size);
2337  }
2338  /* we want header and/or body, if neither then don't do this! */
2339  if(k->getheader || !data->set.opt_no_body) {
2340
2341    if(conn->sockfd != CURL_SOCKET_BAD)
2342      k->keepon |= KEEP_RECV;
2343
2344    if(conn->writesockfd != CURL_SOCKET_BAD) {
2345      /* HTTP 1.1 magic:
2346
2347         Even if we require a 100-return code before uploading data, we might
2348         need to write data before that since the REQUEST may not have been
2349         finished sent off just yet.
2350
2351         Thus, we must check if the request has been sent before we set the
2352         state info where we wait for the 100-return code
2353      */
2354      if((data->state.expect100header) &&
2355         (data->state.proto.http->sending == HTTPSEND_BODY)) {
2356        /* wait with write until we either got 100-continue or a timeout */
2357        k->exp100 = EXP100_AWAITING_CONTINUE;
2358        k->start100 = k->start;
2359
2360        /* set a timeout for the multi interface */
2361        Curl_expire(data, CURL_TIMEOUT_EXPECT_100);
2362      }
2363      else {
2364        if(data->state.expect100header)
2365          /* when we've sent off the rest of the headers, we must await a
2366             100-continue but first finish sending the request */
2367          k->exp100 = EXP100_SENDING_REQUEST;
2368
2369        /* enable the write bit when we're not waiting for continue */
2370        k->keepon |= KEEP_SEND;
2371      }
2372    } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2373  } /* if(k->getheader || !data->set.opt_no_body) */
2374
2375}
2376