aboutsummaryrefslogtreecommitdiff
path: root/lib/transfer.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/transfer.c')
-rw-r--r--lib/transfer.c1462
1 files changed, 759 insertions, 703 deletions
diff --git a/lib/transfer.c b/lib/transfer.c
index 7c73c11b0..159151f59 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -106,6 +106,13 @@
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
+enum {
+ KEEP_NONE,
+ KEEP_READ,
+ KEEP_WRITE
+};
+
+
/*
* compareheader()
*
@@ -161,752 +168,796 @@ compareheader(char *headerline, /* line to check */
return FALSE; /* no match */
}
-/*
- * Transfer()
- *
- * This function is what performs the actual transfer. It is capable of
- * doing both ways simultaneously.
- * The transfer must already have been setup by a call to Curl_Transfer().
- *
- * Note that headers are created in a preallocated buffer of a default size.
- * That buffer can be enlarged on demand, but it is never shrinken again.
- *
- * Parts of this function was once written by the friendly Mark Butler
- * <butlerm@xmission.com>.
- */
-
-static CURLcode
-Transfer(struct connectdata *c_conn)
+CURLcode Curl_readwrite(struct connectdata *conn,
+ bool *done)
{
- ssize_t nread; /* number of bytes read */
- int bytecount = 0; /* total number of bytes read */
- int writebytecount = 0; /* number of bytes written */
- long contentlength=0; /* size of incoming data */
- struct timeval start = Curl_tvnow();
- struct timeval now = start; /* current time */
- bool header = TRUE; /* incoming data has HTTP header */
- int headerline = 0; /* counts header lines to better track the
- first one */
- char *hbufp; /* points at *end* of header line */
- int hbuflen = 0;
- char *str; /* within buf */
- char *str_start; /* within buf */
- char *end_ptr; /* within buf */
- char *p; /* within headerbuff */
- bool content_range = FALSE; /* set TRUE if Content-Range: was found */
- int offset = 0; /* possible resume offset read from the
- Content-Range: header */
- int httpcode = 0; /* error code from the 'HTTP/1.? XXX' line */
- int httpversion = -1; /* the HTTP version*10 */
- bool write_after_100_header = FALSE; /* should we enable the write after
- we received a 100-continue/timeout
- or directly */
-
- /* for the low speed checks: */
- CURLcode urg;
- time_t timeofdoc=0;
- long bodywrites=0;
- int writetype;
-
- /* the highest fd we use + 1 */
- struct SessionHandle *data;
- struct connectdata *conn = (struct connectdata *)c_conn;
- char *buf;
- int maxfd;
+ struct Curl_transfer_keeper *k = &conn->keep;
+ struct SessionHandle *data = conn->data;
+ CURLcode result;
+ ssize_t nread; /* number of bytes read */
+ int didwhat=0;
- data = conn->data; /* there's the root struct */
- buf = data->state.buffer;
- maxfd = (conn->sockfd>conn->writesockfd?conn->sockfd:conn->writesockfd)+1;
-
- hbufp = data->state.headerbuff;
+ do {
+ if((k->keepon & KEEP_READ) &&
+ FD_ISSET(conn->sockfd, &k->readfd)) {
- now = Curl_tvnow();
- start = now;
+ if ((k->bytecount == 0) && (k->writebytecount == 0))
+ Curl_pgrsTime(data, TIMER_STARTTRANSFER);
-#define KEEP_READ 1
-#define KEEP_WRITE 2
+ didwhat |= KEEP_READ;
- Curl_pgrsTime(data, TIMER_PRETRANSFER);
- Curl_speedinit(data);
-
- if((conn->sockfd == -1) &&
- (conn->writesockfd == -1)) {
- /* nothing to read, nothing to write, we're already OK! */
- return CURLE_OK;
- }
+ /* read! */
+ result = Curl_read(conn, conn->sockfd, k->buf,
+ BUFSIZE -1, &nread);
- if (!conn->getheader) {
- header = FALSE;
- if(conn->size > 0)
- Curl_pgrsSetDownloadSize(data, conn->size);
- }
- /* we want header and/or body, if neither then don't do this! */
- if(conn->getheader ||
- !data->set.no_body) {
- fd_set readfd;
- fd_set writefd;
- fd_set rkeepfd;
- fd_set wkeepfd;
- struct timeval interval;
- int keepon=0;
-
- /* timeout every X second
- - makes a better progress meter (i.e even when no data is read, the
- meter can be updated and reflect reality)
- - allows removal of the alarm() crap
- - variable timeout is easier
- */
+ if(result)
+ return result;
- FD_ZERO (&readfd); /* clear it */
- if(conn->sockfd != -1) {
- FD_SET (conn->sockfd, &readfd); /* read socket */
- keepon |= KEEP_READ;
- }
+ /* NULL terminate, allowing string ops to be used */
+ if (0 < (signed int) nread)
+ k->buf[nread] = 0;
- FD_ZERO (&writefd); /* clear it */
- if(conn->writesockfd != -1) {
- if (data->set.expect100header)
- /* wait with write until we either got 100-continue or a timeout */
- write_after_100_header = TRUE;
- else {
- FD_SET (conn->writesockfd, &writefd); /* write socket */
- keepon |= KEEP_WRITE;
+ /* if we receive 0 or less here, the server closed the connection and
+ we bail out from this! */
+ else if (0 >= (signed int) nread) {
+ k->keepon &= ~KEEP_READ;
+ FD_ZERO(&k->rkeepfd);
+ break;
}
- }
- /* get these in backup variables to be able to restore them on each lap in
- the select() loop */
- rkeepfd = readfd;
- wkeepfd = writefd;
+ /* Default buffer to use when we write the buffer, it may be changed
+ in the flow below before the actual storing is done. */
+ k->str = k->buf;
- while (keepon) {
- readfd = rkeepfd; /* set those every lap in the loop */
- writefd = wkeepfd;
- interval.tv_sec = 1;
- interval.tv_usec = 0;
+ /* Since this is a two-state thing, we check if we are parsing
+ headers at the moment or not. */
+ if (k->header) {
+ /* we are in parse-the-header-mode */
- switch (select (maxfd, &readfd, &writefd, NULL, &interval)) {
- case -1: /* select() error, stop reading */
-#ifdef EINTR
- /* The EINTR is not serious, and it seems you might get this more
- ofen when using the lib in a multi-threaded environment! */
- if(errno == EINTR)
- ;
- else
-#endif
- keepon = 0; /* no more read or write */
- continue;
- case 0: /* timeout */
- if (write_after_100_header) {
- write_after_100_header = FALSE;
- FD_SET (conn->writesockfd, &writefd); /* write socket */
- keepon |= KEEP_WRITE;
- wkeepfd = writefd;
- }
- break;
- default:
- if ((bytecount == 0) && (writebytecount == 0))
- Curl_pgrsTime(data, TIMER_STARTTRANSFER);
- if((keepon & KEEP_READ) && FD_ISSET(conn->sockfd, &readfd)) {
- /* read! */
- urg = Curl_read(conn, conn->sockfd, buf, BUFSIZE -1, &nread);
-
- /* NULL terminate, allowing string ops to be used */
- if (0 < (signed int) nread)
- buf[nread] = 0;
-
- /* if we receive 0 or less here, the server closed the connection and
- we bail out from this! */
- else if (0 >= (signed int) nread) {
- keepon &= ~KEEP_READ;
- FD_ZERO(&rkeepfd);
- break;
+ /* header line within buffer loop */
+ do {
+ int hbufp_index;
+
+ /* str_start is start of line within buf */
+ k->str_start = k->str;
+
+ k->end_ptr = strchr (k->str_start, '\n');
+
+ if (!k->end_ptr) {
+ /* no more complete header lines within buffer */
+ /* copy what is remaining into headerbuff */
+ int str_length = (int)strlen(k->str);
+
+ /*
+ * We enlarge the header buffer if it seems to be too
+ * smallish
+ */
+ if (k->hbuflen + (int)str_length >=
+ data->state.headersize) {
+ char *newbuff;
+ long newsize=MAX((k->hbuflen+str_length)*3/2,
+ data->state.headersize*2);
+ hbufp_index = k->hbufp - data->state.headerbuff;
+ newbuff = (char *)realloc(data->state.headerbuff, newsize);
+ if(!newbuff) {
+ failf (data, "Failed to alloc memory for big header!");
+ return CURLE_READ_ERROR;
+ }
+ data->state.headersize=newsize;
+ data->state.headerbuff = newbuff;
+ k->hbufp = data->state.headerbuff + hbufp_index;
+ }
+ strcpy (k->hbufp, k->str);
+ k->hbufp += strlen (k->str);
+ k->hbuflen += strlen (k->str);
+ break; /* read more and try again */
}
- str = buf; /* Default buffer to use when we write the
- buffer, it may be changed in the flow below
- before the actual storing is done. */
+ k->str = k->end_ptr + 1; /* move past new line */
- /* Since this is a two-state thing, we check if we are parsing
- headers at the moment or not. */
-
- if (header) {
- /* we are in parse-the-header-mode */
+ /*
+ * We're about to copy a chunk of data to the end of the
+ * already received header. We make sure that the full string
+ * fit in the allocated header buffer, or else we enlarge
+ * it.
+ */
+ if (k->hbuflen + (k->str - k->str_start) >=
+ data->state.headersize) {
+ char *newbuff;
+ long newsize=MAX((k->hbuflen+
+ (k->str-k->str_start))*3/2,
+ data->state.headersize*2);
+ hbufp_index = k->hbufp - data->state.headerbuff;
+ newbuff = (char *)realloc(data->state.headerbuff, newsize);
+ if(!newbuff) {
+ failf (data, "Failed to alloc memory for big header!");
+ return CURLE_READ_ERROR;
+ }
+ data->state.headersize= newsize;
+ data->state.headerbuff = newbuff;
+ k->hbufp = data->state.headerbuff + hbufp_index;
+ }
- /* header line within buffer loop */
- do {
- int hbufp_index;
+ /* copy to end of line */
+ strncpy (k->hbufp, k->str_start, k->str - k->str_start);
+ k->hbufp += k->str - k->str_start;
+ k->hbuflen += k->str - k->str_start;
+ *k->hbufp = 0;
- str_start = str; /* str_start is start of line within buf */
+ k->p = data->state.headerbuff;
- end_ptr = strchr (str_start, '\n');
-
- if (!end_ptr) {
- /* no more complete header lines within buffer */
- /* copy what is remaining into headerbuff */
- int str_length = (int)strlen(str);
-
- /*
- * We enlarge the header buffer if it seems to be too
- * smallish
- */
- if (hbuflen + (int)str_length >= data->state.headersize) {
- char *newbuff;
- long newsize=MAX((hbuflen+str_length)*3/2,
- data->state.headersize*2);
- hbufp_index = hbufp - data->state.headerbuff;
- newbuff = (char *)realloc(data->state.headerbuff, newsize);
- if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
- return CURLE_READ_ERROR;
- }
- data->state.headersize=newsize;
- data->state.headerbuff = newbuff;
- hbufp = data->state.headerbuff + hbufp_index;
- }
- strcpy (hbufp, str);
- hbufp += strlen (str);
- hbuflen += strlen (str);
- break; /* read more and try again */
- }
+ /****
+ * We now have a FULL header line that p points to
+ *****/
- str = end_ptr + 1; /* move just past new line */
+ if (('\n' == *k->p) || ('\r' == *k->p)) {
+ /* Zero-length header line means end of headers! */
+ if ('\r' == *k->p)
+ k->p++; /* pass the \r byte */
+ if ('\n' == *k->p)
+ k->p++; /* pass the \n byte */
+
+ if(100 == k->httpcode) {
/*
- * We're about to copy a chunk of data to the end of the
- * already received header. We make sure that the full string
- * fit in the allocated header buffer, or else we enlarge
- * it.
+ * we have made a HTTP PUT or POST and this is 1.1-lingo
+ * that tells us that the server is OK with this and ready
+ * to receive our stuff.
+ * However, we'll get more headers now so we must get
+ * back into the header-parsing state!
*/
- if (hbuflen + (str - str_start) >= data->state.headersize) {
- char *newbuff;
- long newsize=MAX((hbuflen+(str-str_start))*3/2,
- data->state.headersize*2);
- hbufp_index = hbufp - data->state.headerbuff;
- newbuff = (char *)realloc(data->state.headerbuff, newsize);
- if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
- return CURLE_READ_ERROR;
- }
- data->state.headersize= newsize;
- data->state.headerbuff = newbuff;
- hbufp = data->state.headerbuff + hbufp_index;
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+ /* if we did wait for this do enable write now! */
+ if (k->write_after_100_header) {
+ k->write_after_100_header = FALSE;
+ FD_SET (conn->writesockfd, &k->writefd); /* write */
+ k->keepon |= KEEP_WRITE;
+ k->wkeepfd = k->writefd;
}
+ }
+ else
+ k->header = FALSE; /* no more header to parse! */
- /* copy to end of line */
- strncpy (hbufp, str_start, str - str_start);
- hbufp += str - str_start;
- hbuflen += str - str_start;
- *hbufp = 0;
-
- p = data->state.headerbuff;
-
- /****
- * We now have a FULL header line that p points to
- *****/
-
- if (('\n' == *p) || ('\r' == *p)) {
- /* Zero-length header line means end of headers! */
-
- if ('\r' == *p)
- p++; /* pass the \r byte */
- if ('\n' == *p)
- p++; /* pass the \n byte */
-
- if(100 == httpcode) {
- /*
- * we have made a HTTP PUT or POST and this is 1.1-lingo
- * that tells us that the server is OK with this and ready
- * to receive our stuff.
- * However, we'll get more headers now so we must get
- * back into the header-parsing state!
- */
- header = TRUE;
- headerline = 0; /* we restart the header line counter */
- /* if we did wait for this do enable write now! */
- if (write_after_100_header) {
- write_after_100_header = FALSE;
- FD_SET (conn->writesockfd, &writefd); /* write socket */
- keepon |= KEEP_WRITE;
- wkeepfd = writefd;
- }
- }
- else
- header = FALSE; /* no more header to parse! */
-
- if (417 == httpcode) {
- /*
- * we got: "417 Expectation Failed" this means:
- * we have made a HTTP call and our Expect Header
- * seems to cause a problem => abort the write operations
- * (or prevent them from starting
- */
- write_after_100_header = FALSE;
- keepon &= ~KEEP_WRITE;
- FD_ZERO(&wkeepfd);
- }
-
- /* now, only output this if the header AND body are requested:
- */
- writetype = CLIENTWRITE_HEADER;
- if (data->set.http_include_header)
- writetype |= CLIENTWRITE_BODY;
-
- urg = Curl_client_write(data, writetype,
- data->state.headerbuff,
- p - data->state.headerbuff);
- if(urg)
- return urg;
-
- data->info.header_size += p - data->state.headerbuff;
- conn->headerbytecount += p - data->state.headerbuff;
-
- if(!header) {
- /*
- * really end-of-headers.
- *
- * If we requested a "no body", this is a good time to get
- * out and return home.
- */
- if(data->set.no_body)
- return CURLE_OK;
+ if (417 == k->httpcode) {
+ /*
+ * we got: "417 Expectation Failed" this means:
+ * we have made a HTTP call and our Expect Header
+ * seems to cause a problem => abort the write operations
+ * (or prevent them from starting
+ */
+ k->write_after_100_header = FALSE;
+ k->keepon &= ~KEEP_WRITE;
+ FD_ZERO(&k->wkeepfd);
+ }
- if(!conn->bits.close) {
- /* If this is not the last request before a close, we must
- set the maximum download size to the size of the
- expected document or else, we won't know when to stop
- reading! */
- if(-1 != conn->size)
- conn->maxdownload = conn->size;
-
- /* If max download size is *zero* (nothing) we already
- have nothing and can safely return ok now! */
- if(0 == conn->maxdownload)
- return CURLE_OK;
-
- /* What to do if the size is *not* known? */
- }
- break; /* exit header line loop */
- }
-
- /* We continue reading headers, so reset the line-based
- header parsing variables hbufp && hbuflen */
- hbufp = data->state.headerbuff;
- hbuflen = 0;
- continue;
- }
+ /* now, only output this if the header AND body are requested:
+ */
+ k->writetype = CLIENTWRITE_HEADER;
+ if (data->set.http_include_header)
+ k->writetype |= CLIENTWRITE_BODY;
+
+ result = Curl_client_write(data, k->writetype,
+ data->state.headerbuff,
+ k->p - data->state.headerbuff);
+ if(result)
+ return result;
+
+ data->info.header_size += k->p - data->state.headerbuff;
+ conn->headerbytecount += k->p - data->state.headerbuff;
+ if(!k->header) {
/*
- * Checks for special headers coming up.
+ * really end-of-headers.
+ *
+ * If we requested a "no body", this is a good time to get
+ * out and return home.
*/
+ if(data->set.no_body)
+ return CURLE_OK;
+
+ if(!conn->bits.close) {
+ /* If this is not the last request before a close, we must
+ set the maximum download size to the size of the
+ expected document or else, we won't know when to stop
+ reading! */
+ if(-1 != conn->size)
+ conn->maxdownload = conn->size;
+
+ /* If max download size is *zero* (nothing) we already
+ have nothing and can safely return ok now! */
+ if(0 == conn->maxdownload)
+ return CURLE_OK;
+
+ /* What to do if the size is *not* known? */
+ }
+ break; /* exit header line loop */
+ }
+
+ /* We continue reading headers, so reset the line-based
+ header parsing variables hbufp && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ continue;
+ }
+
+ /*
+ * Checks for special headers coming up.
+ */
- if (!headerline++) {
- /* This is the first header, it MUST be the error code line
- or else we consiser this to be the body right away! */
- int httpversion_major;
- int nc=sscanf (p, " HTTP/%d.%d %3d",
- &httpversion_major ,&httpversion, &httpcode);
- if (nc==3) {
- httpversion+=10*httpversion_major;
- }
- else {
- /* this is the real world, not a Nirvana
- NCSA 1.5.x returns this crap when asked for HTTP/1.1
- */
- nc=sscanf (p, " HTTP %3d", &httpcode);
- httpversion = 10;
- }
-
- if (nc) {
- data->info.httpcode = httpcode;
- data->info.httpversion = httpversion;
-
- /* 404 -> URL not found! */
- if (data->set.http_fail_on_error &&
- (httpcode >= 400)) {
- /* If we have been told to fail hard on HTTP-errors,
- here is the check for that: */
- /* serious error, go home! */
- failf (data, "The requested file was not found");
- return CURLE_HTTP_NOT_FOUND;
- }
+ if (!k->headerline++) {
+ /* This is the first header, it MUST be the error code line
+ or else we consiser this to be the body right away! */
+ int httpversion_major;
+ int nc=sscanf (k->p, " HTTP/%d.%d %3d",
+ &httpversion_major,
+ &k->httpversion,
+ &k->httpcode);
+ if (nc==3) {
+ k->httpversion += 10 * httpversion_major;
+ }
+ else {
+ /* this is the real world, not a Nirvana
+ NCSA 1.5.x returns this crap when asked for HTTP/1.1
+ */
+ nc=sscanf (k->p, " HTTP %3d", &k->httpcode);
+ k->httpversion = 10;
+ }
- if(httpversion == 10)
- /* Default action for HTTP/1.0 must be to close, unless
- we get one of those fancy headers that tell us the
- server keeps it open for us! */
- conn->bits.close = TRUE;
-
- if (httpcode == 304)
- /* (quote from RFC2616, section 10.3.5):
- * The 304 response MUST NOT contain a
- * message-body, and thus is always
- * terminated by the first empty line
- * after the header fields.
- */
- conn->size=0;
- }
- else {
- header = FALSE; /* this is not a header line */
- break;
- }
- }
- /* check for Content-Length: header lines to get size */
- if (strnequal("Content-Length:", p, 15) &&
- sscanf (p+15, " %ld", &contentlength)) {
- conn->size = contentlength;
- Curl_pgrsSetDownloadSize(data, contentlength);
+ if (nc) {
+ data->info.httpcode = k->httpcode;
+ data->info.httpversion = k->httpversion;
+
+ /* 404 -> URL not found! */
+ if (data->set.http_fail_on_error &&
+ (k->httpcode >= 400)) {
+ /* If we have been told to fail hard on HTTP-errors,
+ here is the check for that: */
+ /* serious error, go home! */
+ failf (data, "The requested file was not found");
+ return CURLE_HTTP_NOT_FOUND;
}
- else if((httpversion == 10) &&
- conn->bits.httpproxy &&
- compareheader(p, "Proxy-Connection:", "keep-alive")) {
- /*
- * When a HTTP/1.0 reply comes when using a proxy, the
- * 'Proxy-Connection: keep-alive' line tells us the
- * connection will be kept alive for our pleasure.
- * Default action for 1.0 is to close.
- */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
- }
- else if((httpversion == 10) &&
- compareheader(p, "Connection:", "keep-alive")) {
- /*
- * A HTTP/1.0 reply with the 'Connection: keep-alive' line
- * tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
- *
- * [RFC2068, section 19.7.1] */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 connection set to keep alive!\n");
- }
- else if (compareheader(p, "Connection:", "close")) {
- /*
- * [RFC 2616, section 8.1.2.1]
- * "Connection: close" is HTTP/1.1 language and means that
- * the connection will close when this request has been
- * served.
+
+ if(k->httpversion == 10)
+ /* Default action for HTTP/1.0 must be to close, unless
+ we get one of those fancy headers that tell us the
+ server keeps it open for us! */
+ conn->bits.close = TRUE;
+
+ if (k->httpcode == 304)
+ /* (quote from RFC2616, section 10.3.5):
+ * The 304 response MUST NOT contain a
+ * message-body, and thus is always
+ * terminated by the first empty line
+ * after the header fields.
*/
- conn->bits.close = TRUE; /* close when done */
- }
- else if (compareheader(p, "Transfer-Encoding:", "chunked")) {
- /*
- * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
- * means that the server will send a series of "chunks". Each
- * chunk starts with line with info (including size of the
- * coming block) (terminated with CRLF), then a block of data
- * with the previously mentioned size. There can be any amount
- * of chunks, and a chunk-data set to zero signals the
- * end-of-chunks. */
- conn->bits.chunk = TRUE; /* chunks coming our way */
-
- /* init our chunky engine */
- Curl_httpchunk_init(conn);
- }
- else if (strnequal("Content-Range:", p, 14)) {
- if (sscanf (p+14, " bytes %d-", &offset) ||
- sscanf (p+14, " bytes: %d-", &offset)) {
- /* This second format was added August 1st 2000 by Igor
- Khristophorov since Sun's webserver JavaWebServer/1.1.1
- obviously sends the header this way! :-( */
- if (conn->resume_from == offset) {
- /* we asked for a resume and we got it */
- content_range = TRUE;
- }
- }
- }
- else if(data->cookies &&
- strnequal("Set-Cookie:", p, 11)) {
- Curl_cookie_add(data->cookies, TRUE, &p[12], conn->name);
- }
- else if(strnequal("Last-Modified:", p,
- strlen("Last-Modified:")) &&
- (data->set.timecondition || data->set.get_filetime) ) {
- time_t secs=time(NULL);
- timeofdoc = curl_getdate(p+strlen("Last-Modified:"), &secs);
- if(data->set.get_filetime>=0)
- data->info.filetime = timeofdoc;
- }
- else if ((httpcode >= 300 && httpcode < 400) &&
- (data->set.http_follow_location) &&
- strnequal("Location:", p, 9)) {
- /* this is the URL that the server advices us to get instead */
- char *ptr;
- char *start=p;
- char backup;
-
- start += 9; /* pass "Location:" */
-
- /* Skip spaces and tabs. We do this to support multiple
- white spaces after the "Location:" keyword. */
- while(*start && isspace((int)*start ))
- start++;
- ptr = start; /* start scanning here */
-
- /* scan through the string to find the end */
- while(*ptr && !isspace((int)*ptr))
- ptr++;
- backup = *ptr; /* store the ending letter */
- *ptr = '\0'; /* zero terminate */
- conn->newurl = strdup(start); /* clone string */
- *ptr = backup; /* restore ending letter */
+ conn->size=0;
+ }
+ else {
+ k->header = FALSE; /* this is not a header line */
+ break;
+ }
+ }
+ /* check for Content-Length: header lines to get size */
+ if (strnequal("Content-Length:", k->p, 15) &&
+ sscanf (k->p+15, " %ld", &k->contentlength)) {
+ conn->size = k->contentlength;
+ Curl_pgrsSetDownloadSize(data, k->contentlength);
+ }
+ else if((k->httpversion == 10) &&
+ conn->bits.httpproxy &&
+ compareheader(k->p, "Proxy-Connection:", "keep-alive")) {
+ /*
+ * When a HTTP/1.0 reply comes when using a proxy, the
+ * 'Proxy-Connection: keep-alive' line tells us the
+ * connection will be kept alive for our pleasure.
+ * Default action for 1.0 is to close.
+ */
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
+ }
+ else if((k->httpversion == 10) &&
+ compareheader(k->p, "Connection:", "keep-alive")) {
+ /*
+ * A HTTP/1.0 reply with the 'Connection: keep-alive' line
+ * tells us the connection will be kept alive for our
+ * pleasure. Default action for 1.0 is to close.
+ *
+ * [RFC2068, section 19.7.1] */
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 connection set to keep alive!\n");
+ }
+ else if (compareheader(k->p, "Connection:", "close")) {
+ /*
+ * [RFC 2616, section 8.1.2.1]
+ * "Connection: close" is HTTP/1.1 language and means that
+ * the connection will close when this request has been
+ * served.
+ */
+ conn->bits.close = TRUE; /* close when done */
+ }
+ else if (compareheader(k->p, "Transfer-Encoding:", "chunked")) {
+ /*
+ * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
+ * means that the server will send a series of "chunks". Each
+ * chunk starts with line with info (including size of the
+ * coming block) (terminated with CRLF), then a block of data
+ * with the previously mentioned size. There can be any amount
+ * of chunks, and a chunk-data set to zero signals the
+ * end-of-chunks. */
+ conn->bits.chunk = TRUE; /* chunks coming our way */
+
+ /* init our chunky engine */
+ Curl_httpchunk_init(conn);
+ }
+ else if (strnequal("Content-Range:", k->p, 14)) {
+ if (sscanf (k->p+14, " bytes %d-", &k->offset) ||
+ sscanf (k->p+14, " bytes: %d-", &k->offset)) {
+ /* This second format was added August 1st 2000 by Igor
+ Khristophorov since Sun's webserver JavaWebServer/1.1.1
+ obviously sends the header this way! :-( */
+ if (conn->resume_from == k->offset) {
+ /* we asked for a resume and we got it */
+ k->content_range = TRUE;
}
+ }
+ }
+ else if(data->cookies &&
+ strnequal("Set-Cookie:", k->p, 11)) {
+ Curl_cookie_add(data->cookies, TRUE, k->p+12, conn->name);
+ }
+ else if(strnequal("Last-Modified:", k->p,
+ strlen("Last-Modified:")) &&
+ (data->set.timecondition || data->set.get_filetime) ) {
+ time_t secs=time(NULL);
+ k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
+ &secs);
+ if(data->set.get_filetime>=0)
+ data->info.filetime = k->timeofdoc;
+ }
+ else if ((k->httpcode >= 300 && k->httpcode < 400) &&
+ (data->set.http_follow_location) &&
+ strnequal("Location:", k->p, 9)) {
+ /* this is the URL that the server advices us to get instead */
+ char *ptr;
+ char *start=k->p;
+ char backup;
+
+ start += 9; /* pass "Location:" */
+
+ /* Skip spaces and tabs. We do this to support multiple
+ white spaces after the "Location:" keyword. */
+ while(*start && isspace((int)*start ))
+ start++;
+ ptr = start; /* start scanning here */
+
+ /* scan through the string to find the end */
+ while(*ptr && !isspace((int)*ptr))
+ ptr++;
+ backup = *ptr; /* store the ending letter */
+ *ptr = '\0'; /* zero terminate */
+ conn->newurl = strdup(start); /* clone string */
+ *ptr = backup; /* restore ending letter */
+ }
- /*
- * End of header-checks. Write them to the client.
- */
+ /*
+ * End of header-checks. Write them to the client.
+ */
- writetype = CLIENTWRITE_HEADER;
- if (data->set.http_include_header)
- writetype |= CLIENTWRITE_BODY;
+ k->writetype = CLIENTWRITE_HEADER;
+ if (data->set.http_include_header)
+ k->writetype |= CLIENTWRITE_BODY;
- urg = Curl_client_write(data, writetype, p, hbuflen);
- if(urg)
- return urg;
+ result = Curl_client_write(data, k->writetype, k->p,
+ k->hbuflen);
+ if(result)
+ return result;
- data->info.header_size += hbuflen;
+ data->info.header_size += k->hbuflen;
- /* reset hbufp pointer && hbuflen */
- hbufp = data->state.headerbuff;
- hbuflen = 0;
- }
- while (*str); /* header line within buffer */
+ /* reset hbufp pointer && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ }
+ while (*k->str); /* header line within buffer */
- /* We might have reached the end of the header part here, but
- there might be a non-header part left in the end of the read
- buffer. */
+ /* We might have reached the end of the header part here, but
+ there might be a non-header part left in the end of the read
+ buffer. */
- if (!header) {
- /* the next token and forward is not part of
- the header! */
+ if (!k->header) {
+ /* the next token and forward is not part of
+ the header! */
- /* we subtract the remaining header size from the buffer */
- nread -= (str - buf);
- }
+ /* we subtract the remaining header size from the buffer */
+ nread -= (k->str - k->buf);
+ }
- } /* end if header mode */
+ } /* end if header mode */
- /* This is not an 'else if' since it may be a rest from the header
- parsing, where the beginning of the buffer is headers and the end
- is non-headers. */
- if (str && !header && ((signed int)nread > 0)) {
+ /* This is not an 'else if' since it may be a rest from the header
+ parsing, where the beginning of the buffer is headers and the end
+ is non-headers. */
+ if (k->str && !k->header && ((signed int)nread > 0)) {
- if(0 == bodywrites) {
- /* These checks are only made the first time we are about to
- write a piece of the body */
- if(conn->protocol&PROT_HTTP) {
- /* HTTP-only checks */
- if (conn->newurl) {
- /* abort after the headers if "follow Location" is set */
- infof (data, "Follow to new URL: %s\n", conn->newurl);
- return CURLE_OK;
- }
- else if (conn->resume_from &&
- !content_range &&
- (data->set.httpreq==HTTPREQ_GET)) {
- /* we wanted to resume a download, although the server
- doesn't seem to support this and we did this with a GET
- (if it wasn't a GET we did a POST or PUT resume) */
- failf (data, "HTTP server doesn't seem to support "
- "byte ranges. Cannot resume.");
- return CURLE_HTTP_RANGE_ERROR;
- }
- else if(data->set.timecondition && !conn->range) {
- /* A time condition has been set AND no ranges have been
- requested. This seems to be what chapter 13.3.4 of
- RFC 2616 defines to be the correct action for a
- HTTP/1.1 client */
- if((timeofdoc > 0) && (data->set.timevalue > 0)) {
- switch(data->set.timecondition) {
- case TIMECOND_IFMODSINCE:
- default:
- if(timeofdoc < data->set.timevalue) {
- infof(data,
- "The requested document is not new enough\n");
- return CURLE_OK;
- }
- break;
- case TIMECOND_IFUNMODSINCE:
- if(timeofdoc > data->set.timevalue) {
- infof(data,
- "The requested document is not old enough\n");
- return CURLE_OK;
- }
- break;
- } /* switch */
- } /* two valid time strings */
- } /* we have a time condition */
-
- } /* this is HTTP */
- } /* this is the first time we write a body part */
- bodywrites++;
-
- if(conn->bits.chunk) {
- /*
- * Bless me father for I have sinned. Here comes a chunked
- * transfer flying and we need to decode this properly. While
- * the name says read, this function both reads and writes away
- * the data. The returned 'nread' holds the number of actual
- * data it wrote to the client. */
- CHUNKcode res =
- Curl_httpchunk_read(conn, str, nread, &nread);
-
- if(CHUNKE_OK < res) {
- failf(data, "Receeived problem in the chunky parser");
- return CURLE_READ_ERROR;
- }
- else if(CHUNKE_STOP == res) {
- /* we're done reading chunks! */
- keepon &= ~KEEP_READ; /* read no more */
- FD_ZERO(&rkeepfd);
-
- /* There are now possibly N number of bytes at the end of the
- str buffer that weren't written to the client, but we don't
- care about them right now. */
- }
- /* If it returned OK, we just keep going */
+ if(0 == k->bodywrites) {
+ /* These checks are only made the first time we are about to
+ write a piece of the body */
+ if(conn->protocol&PROT_HTTP) {
+ /* HTTP-only checks */
+ if (conn->newurl) {
+ /* abort after the headers if "follow Location" is set */
+ infof (data, "Follow to new URL: %s\n", conn->newurl);
+ return CURLE_OK;
}
+ else if (conn->resume_from &&
+ !k->content_range &&
+ (data->set.httpreq==HTTPREQ_GET)) {
+ /* we wanted to resume a download, although the server
+ doesn't seem to support this and we did this with a GET
+ (if it wasn't a GET we did a POST or PUT resume) */
+ failf (data, "HTTP server doesn't seem to support "
+ "byte ranges. Cannot resume.");
+ return CURLE_HTTP_RANGE_ERROR;
+ }
+ else if(data->set.timecondition && !conn->range) {
+ /* A time condition has been set AND no ranges have been
+ requested. This seems to be what chapter 13.3.4 of
+ RFC 2616 defines to be the correct action for a
+ HTTP/1.1 client */
+ if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
+ switch(data->set.timecondition) {
+ case TIMECOND_IFMODSINCE:
+ default:
+ if(k->timeofdoc < data->set.timevalue) {
+ infof(data,
+ "The requested document is not new enough\n");
+ return CURLE_OK;
+ }
+ break;
+ case TIMECOND_IFUNMODSINCE:
+ if(k->timeofdoc > data->set.timevalue) {
+ infof(data,
+ "The requested document is not old enough\n");
+ return CURLE_OK;
+ }
+ break;
+ } /* switch */
+ } /* two valid time strings */
+ } /* we have a time condition */
- if((-1 != conn->maxdownload) &&
- (bytecount + nread >= conn->maxdownload)) {
- nread = conn->maxdownload - bytecount;
- if((signed int)nread < 0 ) /* this should be unusual */
- nread = 0;
+ } /* this is HTTP */
+ } /* this is the first time we write a body part */
+ k->bodywrites++;
- keepon &= ~KEEP_READ; /* we're done reading */
- FD_ZERO(&rkeepfd);
- }
+ if(conn->bits.chunk) {
+ /*
+ * Bless me father for I have sinned. Here comes a chunked
+ * transfer flying and we need to decode this properly. While
+ * the name says read, this function both reads and writes away
+ * the data. The returned 'nread' holds the number of actual
+ * data it wrote to the client. */
+ CHUNKcode res =
+ Curl_httpchunk_read(conn, k->str, nread, &nread);
+
+ if(CHUNKE_OK < res) {
+ failf(data, "Receeived problem in the chunky parser");
+ return CURLE_READ_ERROR;
+ }
+ else if(CHUNKE_STOP == res) {
+ /* we're done reading chunks! */
+ k->keepon &= ~KEEP_READ; /* read no more */
+ FD_ZERO(&k->rkeepfd);
+
+ /* There are now possibly N number of bytes at the end of the
+ str buffer that weren't written to the client, but we don't
+ care about them right now. */
+ }
+ /* If it returned OK, we just keep going */
+ }
+
+ if((-1 != conn->maxdownload) &&
+ (k->bytecount + nread >= conn->maxdownload)) {
+ nread = conn->maxdownload - k->bytecount;
+ if((signed int)nread < 0 ) /* this should be unusual */
+ nread = 0;
+
+ k->keepon &= ~KEEP_READ; /* we're done reading */
+ FD_ZERO(&k->rkeepfd);
+ }
- bytecount += nread;
+ k->bytecount += nread;
- Curl_pgrsSetDownloadCounter(data, (double)bytecount);
+ Curl_pgrsSetDownloadCounter(data, (double)k->bytecount);
- if(!conn->bits.chunk && nread) {
- /* If this is chunky transfer, it was already written */
- urg = Curl_client_write(data, CLIENTWRITE_BODY, str, nread);
- if(urg)
- return urg;
- }
+ if(!conn->bits.chunk && nread) {
+ /* If this is chunky transfer, it was already written */
+ result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
+ nread);
+ if(result)
+ return result;
+ }
- } /* if (! header and data to read ) */
- } /* if( read from socket ) */
+ } /* if (! header and data to read ) */
+ } /* if( read from socket ) */
- if((keepon & KEEP_WRITE) && FD_ISSET(conn->writesockfd, &writefd)) {
- /* write */
+ if((k->keepon & KEEP_WRITE) &&
+ FD_ISSET(conn->writesockfd, &k->writefd)) {
+ /* write */
- int i, si;
- size_t bytes_written;
+ int i, si;
+ size_t bytes_written;
- nread = data->set.fread(buf, 1, conn->upload_bufsize, data->set.in);
+ if ((k->bytecount == 0) && (k->writebytecount == 0))
+ Curl_pgrsTime(data, TIMER_STARTTRANSFER);
- /* the signed int typecase of nread of for systems that has
- unsigned size_t */
- if ((signed int)nread<=0) {
- /* done */
- keepon &= ~KEEP_WRITE; /* we're done writing */
- FD_ZERO(&wkeepfd);
- break;
- }
- writebytecount += nread;
- Curl_pgrsSetUploadCounter(data, (double)writebytecount);
-
- /* convert LF to CRLF if so asked */
- if (data->set.crlf) {
- for(i = 0, si = 0; i < (int)nread; i++, si++) {
- if (buf[i] == 0x0a) {
- data->state.scratch[si++] = 0x0d;
- data->state.scratch[si] = 0x0a;
- }
- else {
- data->state.scratch[si] = buf[i];
- }
- }
- nread = si;
- buf = data->state.scratch; /* point to the new buffer */
- }
+ didwhat |= KEEP_WRITE;
- /* write to socket */
- urg = Curl_write(conn, conn->writesockfd, buf, nread,
- &bytes_written);
+ nread = data->set.fread(k->buf, 1, conn->upload_bufsize,
+ data->set.in);
- if(nread != (int)bytes_written) {
- failf(data, "Failed uploading data");
- return CURLE_WRITE_ERROR;
+ /* the signed int typecase of nread of for systems that has
+ unsigned size_t */
+ if ((signed int)nread<=0) {
+ /* done */
+ k->keepon &= ~KEEP_WRITE; /* we're done writing */
+ FD_ZERO(&k->wkeepfd);
+ break;
+ }
+ k->writebytecount += nread;
+ Curl_pgrsSetUploadCounter(data, (double)k->writebytecount);
+
+ /* convert LF to CRLF if so asked */
+ if (data->set.crlf) {
+ for(i = 0, si = 0; i < (int)nread; i++, si++) {
+ if (k->buf[i] == 0x0a) {
+ data->state.scratch[si++] = 0x0d;
+ data->state.scratch[si] = 0x0a;
+ }
+ else {
+ data->state.scratch[si] = k->buf[i];
}
- if(data->set.crlf)
- buf = data->state.buffer; /* put it back on the buffer */
-
}
+ nread = si;
+ k->buf = data->state.scratch; /* point to the new buffer */
+ }
- break;
+ /* write to socket */
+ result = Curl_write(conn, conn->writesockfd, k->buf, nread,
+ &bytes_written);
+ if(result)
+ return result;
+ else if(nread != (int)bytes_written) {
+ failf(data, "Failed uploading data");
+ return CURLE_WRITE_ERROR;
}
+ else if(data->set.crlf)
+ k->buf = data->state.buffer; /* put it back on the buffer */
- /* Update read/write counters */
- if(conn->bytecountp)
- *conn->bytecountp = bytecount; /* read count */
- if(conn->writebytecountp)
- *conn->writebytecountp = writebytecount; /* write count */
+ }
- now = Curl_tvnow();
- if(Curl_pgrsUpdate(conn))
- urg = CURLE_ABORTED_BY_CALLBACK;
- else
- urg = Curl_speedcheck (data, now);
- if (urg)
- return urg;
-
- if(data->progress.ulspeed > conn->upload_bufsize) {
- /* If we're transfering more data per second than fits in our buffer,
- we increase the buffer size to adjust to the current
- speed. However, we must not set it larger than BUFSIZE. We don't
- adjust it downwards again since we don't see any point in that!
- */
- conn->upload_bufsize=(long)min(data->progress.ulspeed, BUFSIZE);
- }
+ } while(0); /* just to break out from! */
- if (data->set.timeout &&
- ((Curl_tvdiff(now, start)/1000) >= data->set.timeout)) {
- failf (data, "Operation timed out with %d out of %d bytes received",
- bytecount, conn->size);
- return CURLE_OPERATION_TIMEOUTED;
- }
+ if(didwhat) {
+ /* Update read/write counters */
+ if(conn->bytecountp)
+ *conn->bytecountp = k->bytecount; /* read count */
+ if(conn->writebytecountp)
+ *conn->writebytecountp = k->writebytecount; /* write count */
+ }
+ else {
+ /* no read no write, this is a timeout? */
+ if (k->write_after_100_header) {
+ /* This should allow some time for the header to arrive, but only a
+ very short time as otherwise it'll be too much wasted times too
+ often. */
+ k->write_after_100_header = FALSE;
+ FD_SET (conn->writesockfd, &k->writefd); /* write socket */
+ k->keepon |= KEEP_WRITE;
+ k->wkeepfd = k->writefd;
+ }
+ }
+ k->now = Curl_tvnow();
+ if(Curl_pgrsUpdate(conn))
+ result = CURLE_ABORTED_BY_CALLBACK;
+ else
+ result = Curl_speedcheck (data, k->now);
+ if (result)
+ return result;
+
+ if(data->progress.ulspeed > conn->upload_bufsize) {
+ /* If we're transfering more data per second than fits in our buffer,
+ we increase the buffer size to adjust to the current
+ speed. However, we must not set it larger than BUFSIZE. We don't
+ adjust it downwards again since we don't see any point in that!
+ */
+ conn->upload_bufsize=(long)min(data->progress.ulspeed, BUFSIZE);
+ }
+
+ if (data->set.timeout &&
+ ((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
+ failf (data, "Operation timed out with %d out of %d bytes received",
+ k->bytecount, conn->size);
+ return CURLE_OPERATION_TIMEOUTED;
+ }
+
+ if(!k->keepon) {
+ /*
+ * The transfer has been performed. Just make some general checks before
+ * returning.
+ */
+
+ if(!(data->set.no_body) && k->contentlength &&
+ (k->bytecount != k->contentlength) &&
+ !conn->newurl) {
+ failf(data, "transfer closed with %d bytes remaining to read",
+ k->contentlength-k->bytecount);
+ return CURLE_PARTIAL_FILE;
}
+ else if(conn->bits.chunk && conn->proto.http->chunk.datasize) {
+ failf(data, "transfer closed with at least %d bytes remaining",
+ conn->proto.http->chunk.datasize);
+ return CURLE_PARTIAL_FILE;
+ }
+ if(Curl_pgrsUpdate(conn))
+ return CURLE_ABORTED_BY_CALLBACK;
}
- /*
- * The tranfer has been performed. Just make some general checks before
- * returning.
- */
+ /* Now update the "done" boolean we return */
+ *done = !k->keepon;
+
+ return CURLE_OK;
+}
+
+CURLcode Curl_readwrite_init(struct connectdata *conn)
+{
+ struct SessionHandle *data = conn->data;
+ struct Curl_transfer_keeper *k = &conn->keep;
+
+ memset(k, 0, sizeof(struct Curl_transfer_keeper));
+
+ k->start = Curl_tvnow(); /* start time */
+ k->now = k->start; /* current time is now */
+ k->header = TRUE; /* assume header */
+ k->httpversion = -1; /* unknown at this point */
+ k->conn = (struct connectdata *)conn; /* store the connection */
+
+ data = conn->data; /* there's the root struct */
+ k->buf = data->state.buffer;
+ k->maxfd = (conn->sockfd>conn->writesockfd?
+ conn->sockfd:conn->writesockfd)+1;
+ k->hbufp = data->state.headerbuff;
- if(!(data->set.no_body) && contentlength &&
- (bytecount != contentlength)) {
- failf(data, "transfer closed with %d bytes remaining to read",
- contentlength-bytecount);
- return CURLE_PARTIAL_FILE;
+ Curl_pgrsTime(data, TIMER_PRETRANSFER);
+ Curl_speedinit(data);
+
+ if (!conn->getheader) {
+ k->header = FALSE;
+ if(conn->size > 0)
+ Curl_pgrsSetDownloadSize(data, conn->size);
}
- else if(conn->bits.chunk && conn->proto.http->chunk.datasize) {
- failf(data, "transfer closed with at least %d bytes remaining",
- conn->proto.http->chunk.datasize);
- return CURLE_PARTIAL_FILE;
+ /* we want header and/or body, if neither then don't do this! */
+ if(conn->getheader || !data->set.no_body) {
+
+ FD_ZERO (&k->readfd); /* clear it */
+ if(conn->sockfd != -1) {
+ FD_SET (conn->sockfd, &k->readfd); /* read socket */
+ k->keepon |= KEEP_READ;
+ }
+
+ FD_ZERO (&k->writefd); /* clear it */
+ if(conn->writesockfd != -1) {
+ if (data->set.expect100header)
+ /* wait with write until we either got 100-continue or a timeout */
+ k->write_after_100_header = TRUE;
+ else {
+ FD_SET (conn->writesockfd, &k->writefd); /* write socket */
+ k->keepon |= KEEP_WRITE;
+ }
+ }
+
+ /* get these in backup variables to be able to restore them on each lap in
+ the select() loop */
+ k->rkeepfd = k->readfd;
+ k->wkeepfd = k->writefd;
+
}
- if(Curl_pgrsUpdate(conn))
- return CURLE_ABORTED_BY_CALLBACK;
return CURLE_OK;
}
-CURLcode Curl_perform(struct SessionHandle *data)
+void Curl_single_fdset(struct connectdata *conn,
+ fd_set *read_fd_set,
+ fd_set *write_fd_set,
+ fd_set *exc_fd_set,
+ int *max_fd)
{
- CURLcode res;
- struct connectdata *conn=NULL;
- bool port=TRUE; /* allow data->set.use_port to set port to use */
- char *newurl = NULL; /* possibly a new URL to follow to! */
-#ifdef HAVE_SIGNAL
- /* storage for the previous bag^H^H^HSIGPIPE signal handler :-) */
- void (*prev_signal)(int sig);
+ *max_fd = -1; /* init */
+ if(conn->keep.keepon & KEEP_READ) {
+ FD_SET(conn->sockfd, read_fd_set);
+ *max_fd = conn->sockfd;
+ }
+ if(conn->keep.keepon & KEEP_WRITE) {
+ FD_SET(conn->writesockfd, write_fd_set);
+ if(conn->writesockfd > *max_fd)
+ *max_fd = conn->writesockfd;
+ }
+ /* we don't use exceptions, only touch that one to prevent compiler
+ warnings! */
+ *exc_fd_set = *exc_fd_set;
+}
+
+
+/*
+ * Transfer()
+ *
+ * This function is what performs the actual transfer. It is capable of
+ * doing both ways simultaneously.
+ * The transfer must already have been setup by a call to Curl_Transfer().
+ *
+ * Note that headers are created in a preallocated buffer of a default size.
+ * That buffer can be enlarged on demand, but it is never shrinken again.
+ *
+ * Parts of this function was once written by the friendly Mark Butler
+ * <butlerm@xmission.com>.
+ */
+
+static CURLcode
+Transfer(struct connectdata *conn)
+{
+ struct SessionHandle *data = conn->data;
+ CURLcode result;
+ struct Curl_transfer_keeper *k = &conn->keep;
+ bool done=FALSE;
+
+ Curl_readwrite_init(conn);
+
+ if((conn->sockfd == -1) && (conn->writesockfd == -1))
+ /* nothing to read, nothing to write, we're already OK! */
+ return CURLE_OK;
+
+ /* we want header and/or body, if neither then don't do this! */
+ if(!conn->getheader && data->set.no_body)
+ return CURLE_OK;
+
+ while (!done) {
+ struct timeval interval;
+ k->readfd = k->rkeepfd; /* set these every lap in the loop */
+ k->writefd = k->wkeepfd;
+ interval.tv_sec = 1;
+ interval.tv_usec = 0;
+
+ switch (select (k->maxfd, &k->readfd, &k->writefd, NULL,
+ &interval)) {
+ case -1: /* select() error, stop reading */
+#ifdef EINTR
+ /* The EINTR is not serious, and it seems you might get this more
+ ofen when using the lib in a multi-threaded environment! */
+ if(errno == EINTR)
+ ;
+ else
#endif
+ done = TRUE; /* no more read or write */
+ continue;
+ case 0: /* timeout */
+ result = Curl_readwrite(conn, &done);
+ break;
+ default: /* readable descriptors */
+ result = Curl_readwrite(conn, &done);
+ break;
+ }
+ if(result)
+ return result;
+
+ /* "done" signals to us if the transfer(s) are ready */
+ }
+ return CURLE_OK;
+}
+
+CURLcode Curl_pretransfer(struct SessionHandle *data)
+{
if(!data->change.url)
/* we can't do anything wihout URL */
return CURLE_URL_MALFORMAT;
@@ -922,16 +973,45 @@ CURLcode Curl_perform(struct SessionHandle *data)
data->state.this_is_a_follow = FALSE; /* reset this */
data->state.errorbuf = FALSE; /* no error has occurred */
+ /* Allow data->set.use_port to set which port to use. This needs to be
+ * disabled for example when we follow Location: headers to URLs using
+ * different ports! */
+ data->state.allow_port = TRUE;
+
#if defined(HAVE_SIGNAL) && defined(SIGPIPE)
/*************************************************************
* Tell signal handler to ignore SIGPIPE
*************************************************************/
- prev_signal = signal(SIGPIPE, SIG_IGN);
+ data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
#endif
Curl_initinfo(data); /* reset session-specific information "variables" */
Curl_pgrsStartNow(data);
+ return CURLE_OK;
+}
+
+CURLcode Curl_posttransfer(struct SessionHandle *data)
+{
+#if defined(HAVE_SIGNAL) && defined(SIGPIPE)
+ /* restore the signal handler for SIGPIPE before we get back */
+ signal(SIGPIPE, data->state.prev_signal);
+#endif
+
+ return CURLE_OK;
+}
+
+CURLcode Curl_perform(struct SessionHandle *data)
+{
+ CURLcode res;
+ CURLcode res2;
+ struct connectdata *conn=NULL;
+ char *newurl = NULL; /* possibly a new URL to follow to! */
+
+ res = Curl_pretransfer(data);
+ if(res)
+ return res;
+
/*
* It is important that there is NO 'return' from this function any any
* other place than falling down the bottom! This is because we have cleanup
@@ -941,30 +1021,9 @@ CURLcode Curl_perform(struct SessionHandle *data)
do {
Curl_pgrsTime(data, TIMER_STARTSINGLE);
- res = Curl_connect(data, &conn, port);
+ res = Curl_connect(data, &conn);
if(res == CURLE_OK) {
- res = Curl_do(conn);
-
- if((CURLE_WRITE_ERROR == res) && conn->bits.reuse) {
- /* This was a re-use of a connection and we got a write error in the
- * DO-phase. Then we DISCONNECT this connection and have another
- * attempt to CONNECT and then DO again! The retry cannot possibly
- * find another connection to re-use, since we only keep one possible
- * connection for each.
- */
-
- infof(data, "The re-used connection seems dead, get a new one\n");
-
- conn->bits.close = TRUE; /* enforce close of this connetion */
- res = Curl_done(conn); /* we are so done with this */
- if(CURLE_OK == res) {
- /* Now, redo the connect */
- res = Curl_connect(data, &conn, port);
- if(CURLE_OK == res)
- /* ... finally back to actually retry the DO phase */
- res = Curl_do(conn);
- }
- }
+ res = Curl_do(&conn);
if(res == CURLE_OK) {
CURLcode res2; /* just a local extra result container */
@@ -1010,9 +1069,6 @@ CURLcode Curl_perform(struct SessionHandle *data)
char prot[16]; /* URL protocol string storage */
char letter; /* used for a silly sscanf */
- port=TRUE; /* by default we use the user set port number even after
- a Location: */
-
if (data->set.maxredirs && (data->set.followlocation >= data->set.maxredirs)) {
failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
res=CURLE_TOO_MANY_REDIRECTS;
@@ -1101,10 +1157,9 @@ CURLcode Curl_perform(struct SessionHandle *data)
free(url_clone);
newurl = newest;
}
- else {
- /* This is an absolute URL, don't use the custom port number */
- port = FALSE;
- }
+ else
+ /* This is an absolute URL, don't allow the custom port number */
+ data->state.allow_port = FALSE;
if(data->change.url_alloc)
free(data->change.url);
@@ -1182,10 +1237,11 @@ CURLcode Curl_perform(struct SessionHandle *data)
if(newurl)
free(newurl);
-#if defined(HAVE_SIGNAL) && defined(SIGPIPE)
- /* restore the signal handler for SIGPIPE before we get back */
- signal(SIGPIPE, prev_signal);
-#endif
+ /* run post-transfer uncondionally, but don't clobber the return code if
+ we already have an error code recorder */
+ res2 = Curl_posttransfer(data);
+ if(!res && res2)
+ res = res2;
return res;
}