aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Stenberg <daniel@haxx.se>2002-01-03 15:01:22 +0000
committerDaniel Stenberg <daniel@haxx.se>2002-01-03 15:01:22 +0000
commit8b6314ccfbe48bba2cd560812dd1841425f3bd79 (patch)
tree17ca2bf182593acbbc583d224e0af674e9fd8964
parent6de7dc5879b3605a180dafa05f792f132eafdcaa (diff)
merged the multi-dev branch back into MAIN again
-rw-r--r--lib/Makefile.am3
-rw-r--r--lib/multi.c58
-rw-r--r--lib/multi.h3
-rw-r--r--lib/transfer.c1462
-rw-r--r--lib/transfer.h13
-rw-r--r--lib/url.c57
-rw-r--r--lib/url.h10
-rw-r--r--lib/urldata.h61
8 files changed, 925 insertions, 742 deletions
diff --git a/lib/Makefile.am b/lib/Makefile.am
index 63bf02cec..4af61410a 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -57,7 +57,8 @@ escape.h getpass.c netrc.c telnet.h \
getinfo.c getinfo.h transfer.c strequal.c strequal.h easy.c \
security.h security.c krb4.c krb4.h memdebug.c memdebug.h inet_ntoa_r.h \
http_chunks.c http_chunks.h strtok.c strtok.h connect.c connect.h \
-llist.c llist.h hash.c hash.h
+llist.c llist.h hash.c hash.h multi.c multi.h
+
noinst_HEADERS = setup.h transfer.h
diff --git a/lib/multi.c b/lib/multi.c
index ff9e91fea..b7ab209f5 100644
--- a/lib/multi.c
+++ b/lib/multi.c
@@ -22,10 +22,16 @@
*****************************************************************************/
#include "setup.h"
+#include <stdlib.h>
+#include <string.h>
#include <curl/curl.h>
#include "multi.h" /* will become <curl/multi.h> soon */
+#include "urldata.h"
+#include "transfer.h"
+#include "url.h"
+
struct Curl_message {
/* the 'CURLMsg' is the part that is visible to the external user */
struct CURLMsg extmsg;
@@ -48,7 +54,9 @@ struct Curl_one_easy {
struct Curl_one_easy *next;
struct Curl_one_easy *prev;
- CURL *easy_handle; /* this is the easy handle for this unit */
+ struct SessionHandle *easy_handle; /* the easy handle for this unit */
+ struct connectdata *easy_conn; /* the "unit's" connection */
+
CURLMstate state; /* the handle's state */
CURLcode result; /* previous result */
};
@@ -134,7 +142,7 @@ CURLMcode curl_multi_add_handle(CURLM *multi_handle,
/* increase the node-counter */
multi->num_easy++;
- return CURLM_OK;
+ return CURLM_CALL_MULTI_PERFORM;
}
CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
@@ -190,23 +198,30 @@ CURLMcode curl_multi_fdset(CURLM *multi_handle,
and then we must make sure that is done. */
struct Curl_multi *multi=(struct Curl_multi *)multi_handle;
struct Curl_one_easy *easy;
+ int this_max_fd=-1;
if(!GOOD_MULTI_HANDLE(multi))
return CURLM_BAD_HANDLE;
+ *max_fd = -1; /* so far none! */
+
easy=multi->easy.next;
while(easy) {
switch(easy->state) {
- case CURLM_STATE_INIT:
- case CURLM_STATE_CONNECT:
- case CURLM_STATE_DO:
- case CURLM_STATE_DONE:
- /* we want curl_multi_perform() to get called, but we don't have any
- file descriptors to set */
+ default:
break;
case CURLM_STATE_PERFORM:
/* This should have a set of file descriptors for us to set. */
/* after the transfer is done, go DONE */
+
+ Curl_single_fdset(easy->easy_conn,
+ read_fd_set, write_fd_set,
+ exc_fd_set, &this_max_fd);
+
+ /* remember the maximum file descriptor */
+ if(this_max_fd > *max_fd)
+ *max_fd = this_max_fd;
+
break;
}
easy = easy->next; /* check next handle */
@@ -222,6 +237,8 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
bool done;
CURLMcode result=CURLM_OK;
+ *running_handles = 0; /* bump this once for every living handle */
+
if(!GOOD_MULTI_HANDLE(multi))
return CURLM_BAD_HANDLE;
@@ -239,8 +256,9 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
}
break;
case CURLM_STATE_CONNECT:
- /* connect */
- easy->result = Curl_connect(easy->easy_handle);
+ /* Connect. We get a connection identifier filled in. */
+ easy->result = Curl_connect(easy->easy_handle, &easy->easy_conn);
+
/* after connect, go DO */
if(CURLE_OK == easy->result) {
easy->state = CURLM_STATE_DO;
@@ -249,15 +267,18 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
break;
case CURLM_STATE_DO:
/* Do the fetch or put request */
- easy->result = Curl_do(easy->easy_handle);
+ easy->result = Curl_do(&easy->easy_conn);
/* after do, go PERFORM */
if(CURLE_OK == easy->result) {
- easy->state = CURLM_STATE_PERFORM;
+ if(CURLE_OK == Curl_readwrite_init(easy->easy_conn)) {
+ easy->state = CURLM_STATE_PERFORM;
+ result = CURLM_CALL_MULTI_PERFORM;
+ }
}
break;
case CURLM_STATE_PERFORM:
/* read/write data if it is ready to do so */
- easy->result = Curl_readwrite(easy->easy_handle, &done);
+ easy->result = Curl_readwrite(easy->easy_conn, &done);
/* hm, when we follow redirects, we may need to go back to the CONNECT
state */
/* after the transfer is done, go DONE */
@@ -265,11 +286,12 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
/* call this even if the readwrite function returned error */
easy->result = Curl_posttransfer(easy->easy_handle);
easy->state = CURLM_STATE_DONE;
+ result = CURLM_CALL_MULTI_PERFORM;
}
break;
case CURLM_STATE_DONE:
/* post-transfer command */
- easy->result = Curl_done(easy->easy_handle);
+ easy->result = Curl_done(easy->easy_conn);
/* after we have DONE what we're supposed to do, go COMPLETED */
if(CURLE_OK == easy->result)
easy->state = CURLM_STATE_COMPLETED;
@@ -280,7 +302,10 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
/* This node should be delinked from the list now and we should post
an information message that we are complete. */
break;
+ default:
+ return CURLM_INTERNAL_ERROR;
}
+
if((CURLM_STATE_COMPLETED != easy->state) &&
(CURLE_OK != easy->result)) {
/*
@@ -289,10 +314,13 @@ CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles)
*/
easy->state = CURLM_STATE_COMPLETED;
}
+ else if(CURLM_STATE_COMPLETED != easy->state)
+ /* this one still lives! */
+ (*running_handles)++;
easy = easy->next; /* operate on next handle */
}
- return CURLM_OK;
+ return result;
}
CURLMcode curl_multi_cleanup(CURLM *multi_handle)
diff --git a/lib/multi.h b/lib/multi.h
index 1fdce73f0..40010d500 100644
--- a/lib/multi.h
+++ b/lib/multi.h
@@ -50,7 +50,7 @@
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
-#include <curl/types.h>
+#include <curl/curl.h>
typedef void CURLM;
@@ -60,6 +60,7 @@ typedef enum {
CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */
CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */
CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */
+ CURLM_INTERNAL_ERROR, /* this is a libcurl bug */
CURLM_LAST
} CURLMcode;
diff --git a/lib/transfer.c b/lib/transfer.c
index 7c73c11b0..159151f59 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -106,6 +106,13 @@
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
+enum {
+ KEEP_NONE,
+ KEEP_READ,
+ KEEP_WRITE
+};
+
+
/*
* compareheader()
*
@@ -161,752 +168,796 @@ compareheader(char *headerline, /* line to check */
return FALSE; /* no match */
}
-/*
- * Transfer()
- *
- * This function is what performs the actual transfer. It is capable of
- * doing both ways simultaneously.
- * The transfer must already have been setup by a call to Curl_Transfer().
- *
- * Note that headers are created in a preallocated buffer of a default size.
- * That buffer can be enlarged on demand, but it is never shrinken again.
- *
- * Parts of this function was once written by the friendly Mark Butler
- * <butlerm@xmission.com>.
- */
-
-static CURLcode
-Transfer(struct connectdata *c_conn)
+CURLcode Curl_readwrite(struct connectdata *conn,
+ bool *done)
{
- ssize_t nread; /* number of bytes read */
- int bytecount = 0; /* total number of bytes read */
- int writebytecount = 0; /* number of bytes written */
- long contentlength=0; /* size of incoming data */
- struct timeval start = Curl_tvnow();
- struct timeval now = start; /* current time */
- bool header = TRUE; /* incoming data has HTTP header */
- int headerline = 0; /* counts header lines to better track the
- first one */
- char *hbufp; /* points at *end* of header line */
- int hbuflen = 0;
- char *str; /* within buf */
- char *str_start; /* within buf */
- char *end_ptr; /* within buf */
- char *p; /* within headerbuff */
- bool content_range = FALSE; /* set TRUE if Content-Range: was found */
- int offset = 0; /* possible resume offset read from the
- Content-Range: header */
- int httpcode = 0; /* error code from the 'HTTP/1.? XXX' line */
- int httpversion = -1; /* the HTTP version*10 */
- bool write_after_100_header = FALSE; /* should we enable the write after
- we received a 100-continue/timeout
- or directly */
-
- /* for the low speed checks: */
- CURLcode urg;
- time_t timeofdoc=0;
- long bodywrites=0;
- int writetype;
-
- /* the highest fd we use + 1 */
- struct SessionHandle *data;
- struct connectdata *conn = (struct connectdata *)c_conn;
- char *buf;
- int maxfd;
+ struct Curl_transfer_keeper *k = &conn->keep;
+ struct SessionHandle *data = conn->data;
+ CURLcode result;
+ ssize_t nread; /* number of bytes read */
+ int didwhat=0;
- data = conn->data; /* there's the root struct */
- buf = data->state.buffer;
- maxfd = (conn->sockfd>conn->writesockfd?conn->sockfd:conn->writesockfd)+1;
-
- hbufp = data->state.headerbuff;
+ do {
+ if((k->keepon & KEEP_READ) &&
+ FD_ISSET(conn->sockfd, &k->readfd)) {
- now = Curl_tvnow();
- start = now;
+ if ((k->bytecount == 0) && (k->writebytecount == 0))
+ Curl_pgrsTime(data, TIMER_STARTTRANSFER);
-#define KEEP_READ 1
-#define KEEP_WRITE 2
+ didwhat |= KEEP_READ;
- Curl_pgrsTime(data, TIMER_PRETRANSFER);
- Curl_speedinit(data);
-
- if((conn->sockfd == -1) &&
- (conn->writesockfd == -1)) {
- /* nothing to read, nothing to write, we're already OK! */
- return CURLE_OK;
- }
+ /* read! */
+ result = Curl_read(conn, conn->sockfd, k->buf,
+ BUFSIZE -1, &nread);
- if (!conn->getheader) {
- header = FALSE;
- if(conn->size > 0)
- Curl_pgrsSetDownloadSize(data, conn->size);
- }
- /* we want header and/or body, if neither then don't do this! */
- if(conn->getheader ||
- !data->set.no_body) {
- fd_set readfd;
- fd_set writefd;
- fd_set rkeepfd;
- fd_set wkeepfd;
- struct timeval interval;
- int keepon=0;
-
- /* timeout every X second
- - makes a better progress meter (i.e even when no data is read, the
- meter can be updated and reflect reality)
- - allows removal of the alarm() crap
- - variable timeout is easier
- */
+ if(result)
+ return result;
- FD_ZERO (&readfd); /* clear it */
- if(conn->sockfd != -1) {
- FD_SET (conn->sockfd, &readfd); /* read socket */
- keepon |= KEEP_READ;
- }
+ /* NULL terminate, allowing string ops to be used */
+ if (0 < (signed int) nread)
+ k->buf[nread] = 0;
- FD_ZERO (&writefd); /* clear it */
- if(conn->writesockfd != -1) {
- if (data->set.expect100header)
- /* wait with write until we either got 100-continue or a timeout */
- write_after_100_header = TRUE;
- else {
- FD_SET (conn->writesockfd, &writefd); /* write socket */
- keepon |= KEEP_WRITE;
+ /* if we receive 0 or less here, the server closed the connection and
+ we bail out from this! */
+ else if (0 >= (signed int) nread) {
+ k->keepon &= ~KEEP_READ;
+ FD_ZERO(&k->rkeepfd);
+ break;
}
- }
- /* get these in backup variables to be able to restore them on each lap in
- the select() loop */
- rkeepfd = readfd;
- wkeepfd = writefd;
+ /* Default buffer to use when we write the buffer, it may be changed
+ in the flow below before the actual storing is done. */
+ k->str = k->buf;
- while (keepon) {
- readfd = rkeepfd; /* set those every lap in the loop */
- writefd = wkeepfd;
- interval.tv_sec = 1;
- interval.tv_usec = 0;
+ /* Since this is a two-state thing, we check if we are parsing
+ headers at the moment or not. */
+ if (k->header) {
+ /* we are in parse-the-header-mode */
- switch (select (maxfd, &readfd, &writefd, NULL, &interval)) {
- case -1: /* select() error, stop reading */
-#ifdef EINTR
- /* The EINTR is not serious, and it seems you might get this more
- ofen when using the lib in a multi-threaded environment! */
- if(errno == EINTR)
- ;
- else
-#endif
- keepon = 0; /* no more read or write */
- continue;
- case 0: /* timeout */
- if (write_after_100_header) {
- write_after_100_header = FALSE;
- FD_SET (conn->writesockfd, &writefd); /* write socket */
- keepon |= KEEP_WRITE;
- wkeepfd = writefd;
- }
- break;
- default:
- if ((bytecount == 0) && (writebytecount == 0))
- Curl_pgrsTime(data, TIMER_STARTTRANSFER);
- if((keepon & KEEP_READ) && FD_ISSET(conn->sockfd, &readfd)) {
- /* read! */
- urg = Curl_read(conn, conn->sockfd, buf, BUFSIZE -1, &nread);
-
- /* NULL terminate, allowing string ops to be used */
- if (0 < (signed int) nread)
- buf[nread] = 0;
-
- /* if we receive 0 or less here, the server closed the connection and
- we bail out from this! */
- else if (0 >= (signed int) nread) {
- keepon &= ~KEEP_READ;
- FD_ZERO(&rkeepfd);
- break;
+ /* header line within buffer loop */
+ do {
+ int hbufp_index;
+
+ /* str_start is start of line within buf */
+ k->str_start = k->str;
+
+ k->end_ptr = strchr (k->str_start, '\n');
+
+ if (!k->end_ptr) {
+ /* no more complete header lines within buffer */
+ /* copy what is remaining into headerbuff */
+ int str_length = (int)strlen(k->str);
+
+ /*
+ * We enlarge the header buffer if it seems to be too
+ * smallish
+ */
+ if (k->hbuflen + (int)str_length >=
+ data->state.headersize) {
+ char *newbuff;
+ long newsize=MAX((k->hbuflen+str_length)*3/2,
+ data->state.headersize*2);
+ hbufp_index = k->hbufp - data->state.headerbuff;
+ newbuff = (char *)realloc(data->state.headerbuff, newsize);
+ if(!newbuff) {
+ failf (data, "Failed to alloc memory for big header!");
+ return CURLE_READ_ERROR;
+ }
+ data->state.headersize=newsize;
+ data->state.headerbuff = newbuff;
+ k->hbufp = data->state.headerbuff + hbufp_index;
+ }
+ strcpy (k->hbufp, k->str);
+ k->hbufp += strlen (k->str);
+ k->hbuflen += strlen (k->str);
+ break; /* read more and try again */
}
- str = buf; /* Default buffer to use when we write the
- buffer, it may be changed in the flow below
- before the actual storing is done. */
+ k->str = k->end_ptr + 1; /* move past new line */
- /* Since this is a two-state thing, we check if we are parsing
- headers at the moment or not. */
-
- if (header) {
- /* we are in parse-the-header-mode */
+ /*
+ * We're about to copy a chunk of data to the end of the
+ * already received header. We make sure that the full string
+ * fit in the allocated header buffer, or else we enlarge
+ * it.
+ */
+ if (k->hbuflen + (k->str - k->str_start) >=
+ data->state.headersize) {
+ char *newbuff;
+ long newsize=MAX((k->hbuflen+
+ (k->str-k->str_start))*3/2,
+ data->state.headersize*2);
+ hbufp_index = k->hbufp - data->state.headerbuff;
+ newbuff = (char *)realloc(data->state.headerbuff, newsize);
+ if(!newbuff) {
+ failf (data, "Failed to alloc memory for big header!");
+ return CURLE_READ_ERROR;
+ }
+ data->state.headersize= newsize;
+ data->state.headerbuff = newbuff;
+ k->hbufp = data->state.headerbuff + hbufp_index;
+ }
- /* header line within buffer loop */
- do {
- int hbufp_index;
+ /* copy to end of line */
+ strncpy (k->hbufp, k->str_start, k->str - k->str_start);
+ k->hbufp += k->str - k->str_start;
+ k->hbuflen += k->str - k->str_start;
+ *k->hbufp = 0;
- str_start = str; /* str_start is start of line within buf */
+ k->p = data->state.headerbuff;
- end_ptr = strchr (str_start, '\n');
-
- if (!end_ptr) {
- /* no more complete header lines within buffer */
- /* copy what is remaining into headerbuff */
- int str_length = (int)strlen(str);
-
- /*
- * We enlarge the header buffer if it seems to be too
- * smallish
- */
- if (hbuflen + (int)str_length >= data->state.headersize) {
- char *newbuff;
- long newsize=MAX((hbuflen+str_length)*3/2,
- data->state.headersize*2);
- hbufp_index = hbufp - data->state.headerbuff;
- newbuff = (char *)realloc(data->state.headerbuff, newsize);
- if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
- return CURLE_READ_ERROR;
- }
- data->state.headersize=newsize;
- data->state.headerbuff = newbuff;
- hbufp = data->state.headerbuff + hbufp_index;
- }
- strcpy (hbufp, str);
- hbufp += strlen (str);
- hbuflen += strlen (str);
- break; /* read more and try again */
- }
+ /****
+ * We now have a FULL header line that p points to
+ *****/
- str = end_ptr + 1; /* move just past new line */
+ if (('\n' == *k->p) || ('\r' == *k->p)) {
+ /* Zero-length header line means end of headers! */
+ if ('\r' == *k->p)
+ k->p++; /* pass the \r byte */
+ if ('\n' == *k->p)
+ k->p++; /* pass the \n byte */
+
+ if(100 == k->httpcode) {
/*
- * We're about to copy a chunk of data to the end of the
- * already received header. We make sure that the full string
- * fit in the allocated header buffer, or else we enlarge
- * it.
+ * we have made a HTTP PUT or POST and this is 1.1-lingo
+ * that tells us that the server is OK with this and ready
+ * to receive our stuff.
+ * However, we'll get more headers now so we must get
+ * back into the header-parsing state!
*/
- if (hbuflen + (str - str_start) >= data->state.headersize) {
- char *newbuff;
- long newsize=MAX((hbuflen+(str-str_start))*3/2,
- data->state.headersize*2);
- hbufp_index = hbufp - data->state.headerbuff;
- newbuff = (char *)realloc(data->state.headerbuff, newsize);
- if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
- return CURLE_READ_ERROR;
- }
- data->state.headersize= newsize;
- data->state.headerbuff = newbuff;
- hbufp = data->state.headerbuff + hbufp_index;
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+ /* if we did wait for this do enable write now! */
+ if (k->write_after_100_header) {
+ k->write_after_100_header = FALSE;
+ FD_SET (conn->writesockfd, &k->writefd); /* write */
+ k->keepon |= KEEP_WRITE;
+ k->wkeepfd = k->writefd;
}
+ }
+ else
+ k->header = FALSE; /* no more header to parse! */
- /* copy to end of line */
- strncpy (hbufp, str_start, str - str_start);
- hbufp += str - str_start;
- hbuflen += str - str_start;
- *hbufp = 0;
-
- p = data->state.headerbuff;
-
- /****
- * We now have a FULL header line that p points to
- *****/
-
- if (('\n' == *p) || ('\r' == *p)) {
- /* Zero-length header line means end of headers! */
-
- if ('\r' == *p)
- p++; /* pass the \r byte */
- if ('\n' == *p)
- p++; /* pass the \n byte */
-
- if(100 == httpcode) {
- /*
- * we have made a HTTP PUT or POST and this is 1.1-lingo
- * that tells us that the server is OK with this and ready
- * to receive our stuff.
- * However, we'll get more headers now so we must get
- * back into the header-parsing state!
- */
- header = TRUE;
- headerline = 0; /* we restart the header line counter */
- /* if we did wait for this do enable write now! */
- if (write_after_100_header) {
- write_after_100_header = FALSE;
- FD_SET (conn->writesockfd, &writefd); /* write socket */
- keepon |= KEEP_WRITE;
- wkeepfd = writefd;
- }
- }
- else
- header = FALSE; /* no more header to parse! */
-
- if (417 == httpcode) {
- /*
- * we got: "417 Expectation Failed" this means:
- * we have made a HTTP call and our Expect Header
- * seems to cause a problem => abort the write operations
- * (or prevent them from starting
- */
- write_after_100_header = FALSE;
- keepon &= ~KEEP_WRITE;
- FD_ZERO(&wkeepfd);
- }
-
- /* now, only output this if the header AND body are requested:
- */
- writetype = CLIENTWRITE_HEADER;
- if (data->set.http_include_header)
- writetype |= CLIENTWRITE_BODY;
-
- urg = Curl_client_write(data, writetype,
- data->state.headerbuff,
- p - data->state.headerbuff);
- if(urg)
- return urg;
-
- data->info.header_size += p - data->state.headerbuff;
- conn->headerbytecount += p - data->state.headerbuff;
-
- if(!header) {
- /*
- * really end-of-headers.
- *
- * If we requested a "no body", this is a good time to get
- * out and return home.
- */
- if(data->set.no_body)
- return CURLE_OK;
+ if (417 == k->httpcode) {
+ /*
+ * we got: "417 Expectation Failed" this means:
+ * we have made a HTTP call and our Expect Header
+ * seems to cause a problem => abort the write operations
+ * (or prevent them from starting
+ */
+ k->write_after_100_header = FALSE;
+ k->keepon &= ~KEEP_WRITE;
+ FD_ZERO(&k->wkeepfd);
+ }
- if(!conn->bits.close) {
- /* If this is not the last request before a close, we must
- set the maximum download size to the size of the
- expected document or else, we won't know when to stop
- reading! */
- if(-1 != conn->size)
- conn->maxdownload = conn->size;
-
- /* If max download size is *zero* (nothing) we already
- have nothing and can safely return ok now! */
- if(0 == conn->maxdownload)
- return CURLE_OK;
-
- /* What to do if the size is *not* known? */
- }
- break; /* exit header line loop */
- }
-
- /* We continue reading headers, so reset the line-based
- header parsing variables hbufp && hbuflen */
- hbufp = data->state.headerbuff;
- hbuflen = 0;
- continue;
- }
+ /* now, only output this if the header AND body are requested:
+ */
+ k->writetype = CLIENTWRITE_HEADER;
+ if (data->set.http_include_header)
+ k->writetype |= CLIENTWRITE_BODY;
+
+ result = Curl_client_write(data, k->writetype,
+ data->state.headerbuff,
+ k->p - data->state.headerbuff);
+ if(result)
+ return result;
+
+ data->info.header_size += k->p - data->state.headerbuff;
+ conn->headerbytecount += k->p - data->state.headerbuff;
+ if(!k->header) {
/*
- * Checks for special headers coming up.
+ * really end-of-headers.
+ *
+ * If we requested a "no body", this is a good time to get
+ * out and return home.
*/
+ if(data->set.no_body)
+ return CURLE_OK;
+
+ if(!conn->bits.close) {
+ /* If this is not the last request before a close, we must
+ set the maximum download size to the size of the
+ expected document or else, we won't know when to stop
+ reading! */
+ if(-1 != conn->size)
+ conn->maxdownload = conn->size;
+
+ /* If max download size is *zero* (nothing) we already
+ have nothing and can safely return ok now! */
+ if(0 == conn->maxdownload)
+ return CURLE_OK;
+
+ /* What to do if the size is *not* known? */
+ }
+ break; /* exit header line loop */
+ }
+
+ /* We continue reading headers, so reset the line-based
+ header parsing variables hbufp && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ continue;
+ }
+
+ /*
+ * Checks for special headers coming up.
+ */
- if (!headerline++) {
- /* This is the first header, it MUST be the error code line
- or else we consiser this to be the body right away! */
- int httpversion_major;
- int nc=sscanf (p, " HTTP/%d.%d %3d",
- &httpversion_major ,&httpversion, &httpcode);
- if (nc==3) {
- httpversion+=10*httpversion_major;
- }
- else {
- /* this is the real world, not a Nirvana
- NCSA 1.5.x returns this crap when asked for HTTP/1.1
- */
- nc=sscanf (p, " HTTP %3d", &httpcode);
- httpversion = 10;
- }
-
- if (nc) {
- data->info.httpcode = httpcode;
- data->info.httpversion = httpversion;
-
- /* 404 -> URL not found! */
- if (data->set.http_fail_on_error &&
- (httpcode >= 400)) {
- /* If we have been told to fail hard on HTTP-errors,
- here is the check for that: */
- /* serious error, go home! */
- failf (data, "The requested file was not found");
- return CURLE_HTTP_NOT_FOUND;
- }
+ if (!k->headerline++) {
+ /* This is the first header, it MUST be the error code line
+ or else we consiser this to be the body right away! */
+ int httpversion_major;
+ int nc=sscanf (k->p, " HTTP/%d.%d %3d",
+ &httpversion_major,
+ &k->httpversion,
+ &k->httpcode);
+ if (nc==3) {
+ k->httpversion += 10 * httpversion_major;
+ }
+ else {
+ /* this is the real world, not a Nirvana
+ NCSA 1.5.x returns this crap when asked for HTTP/1.1
+ */
+ nc=sscanf (k->p, " HTTP %3d", &k->httpcode);
+ k->httpversion = 10;
+ }
- if(httpversion == 10)
- /* Default action for HTTP/1.0 must be to close, unless
- we get one of those fancy headers that tell us the
- server keeps it open for us! */
- conn->bits.close = TRUE;
-
- if (httpcode == 304)
- /* (quote from RFC2616, section 10.3.5):
- * The 304 response MUST NOT contain a
- * message-body, and thus is always
- * terminated by the first empty line
- * after the header fields.
- */
- conn->size=0;
- }
- else {
- header = FALSE; /* this is not a header line */
- break;
- }
- }
- /* check for Content-Length: header lines to get size */
- if (strnequal("Content-Length:", p, 15) &&
- sscanf (p+15, " %ld", &contentlength)) {
- conn->size = contentlength;
- Curl_pgrsSetDownloadSize(data, contentlength);
+ if (nc) {
+ data->info.httpcode = k->httpcode;
+ data->info.httpversion = k->httpversion;
+
+ /* 404 -> URL not found! */
+ if (data->set.http_fail_on_error &&
+ (k->httpcode >= 400)) {
+ /* If we have been told to fail hard on HTTP-errors,
+ here is the check for that: */
+ /* serious error, go home! */
+ failf (data, "The requested file was not found");
+ return CURLE_HTTP_NOT_FOUND;
}
- else if((httpversion == 10) &&
- conn->bits.httpproxy &&
- compareheader(p, "Proxy-Connection:", "keep-alive")) {
- /*
- * When a HTTP/1.0 reply comes when using a proxy, the
- * 'Proxy-Connection: keep-alive' line tells us the
- * connection will be kept alive for our pleasure.
- * Default action for 1.0 is to close.
- */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
- }
- else if((httpversion == 10) &&
- compareheader(p, "Connection:", "keep-alive")) {
- /*
- * A HTTP/1.0 reply with the 'Connection: keep-alive' line
- * tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
- *
- * [RFC2068, section 19.7.1] */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 connection set to keep alive!\n");
- }
- else if (compareheader(p, "Connection:", "close")) {
- /*
- * [RFC 2616, section 8.1.2.1]
- * "Connection: close" is HTTP/1.1 language and means that
- * the connection will close when this request has been
- * served.
+
+ if(k->httpversion == 10)
+ /* Default action for HTTP/1.0 must be to close, unless
+ we get one of those fancy headers that tell us the
+ server keeps it open for us! */
+ conn->bits.close = TRUE;
+
+ if (k->httpcode == 304)
+ /* (quote from RFC2616, section 10.3.5):
+ * The 304 response MUST NOT contain a
+ * message-body, and thus is always
+ * terminated by the first empty line
+ * after the header fields.
*/
- conn->bits.close = TRUE; /* close when done */
- }
- else if (compareheader(p, "Transfer-Encoding:", "chunked")) {
- /*
- * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
- * means that the server will send a series of "chunks". Each
- * chunk starts with line with info (including size of the
- * coming block) (terminated with CRLF), then a block of data
- * with the previously mentioned size. There can be any amount
- * of chunks, and a chunk-data set to zero signals the
- * end-of-chunks. */
- conn->bits.chunk = TRUE; /* chunks coming our way */
-
- /* init our chunky engine */
- Curl_httpchunk_init(conn);
- }
- else if (strnequal("Content-Range:", p, 14)) {
- if (sscanf (p+14, " bytes %d-", &offset) ||
- sscanf (p+14, " bytes: %d-", &offset)) {
- /* This second format was added August 1st 2000 by Igor
- Khristophorov since Sun's webserver JavaWebServer/1.1.1
- obviously sends the header this way! :-( */
- if (conn->resume_from == offset) {
- /* we asked for a resume and we got it */
- content_range = TRUE;
- }
- }
- }
- else if(data->cookies &&
- strnequal("Set-Cookie:", p, 11)) {
- Curl_cookie_add(data->cookies, TRUE, &p[12], conn->name);
- }
- else if(strnequal("Last-Modified:", p,
- strlen("Last-Modified:")) &&
- (data->set.timecondition || data->set.get_filetime) ) {
- time_t secs=time(NULL);
- timeofdoc = curl_getdate(p+strlen("Last-Modified:"), &secs);
- if(data->set.get_filetime>=0)
- data->info.filetime = timeofdoc;
- }
- else if ((httpcode >= 300 && httpcode < 400) &&
- (data->set.http_follow_location) &&
- strnequal("Location:", p, 9)) {
- /* this is the URL that the server advices us to get instead */
- char *ptr;
- char *start=p;
- char backup;
-
- start += 9; /* pass "Location:" */
-
- /* Skip spaces and tabs. We do this to support multiple
- white spaces after the "Location:" keyword. */
- while(*start && isspace((int)*start ))
- start++;
- ptr = start; /* start scanning here */
-
- /* scan through the string to find the end */
- while(*ptr && !isspace((int)*ptr))
- ptr++;
- backup = *ptr; /* store the ending letter */
- *ptr = '\0'; /* zero terminate */
- conn->newurl = strdup(start); /* clone string */
- *ptr = backup; /* restore ending letter */
+ conn->size=0;
+ }
+ else {
+ k->header = FALSE; /* this is not a header line */
+ break;
+ }
+ }
+ /* check for Content-Length: header lines to get size */
+ if (strnequal("Content-Length:", k->p, 15) &&
+ sscanf (k->p+15, " %ld", &k->contentlength)) {
+ conn->size = k->contentlength;
+ Curl_pgrsSetDownloadSize(data, k->contentlength);
+ }
+ else if((k->httpversion == 10) &&
+ conn->bits.httpproxy &&
+ compareheader(k->p, "Proxy-Connection:", "keep-alive")) {
+ /*
+ * When a HTTP/1.0 reply comes when using a proxy, the
+ * 'Proxy-Connection: keep-alive' line tells us the
+ * connection will be kept alive for our pleasure.
+ * Default action for 1.0 is to close.
+ */
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
+ }
+ else if((k->httpversion == 10) &&
+ compareheader(k->p, "Connection:", "keep-alive")) {
+ /*
+ * A HTTP/1.0 reply with the 'Connection: keep-alive' line
+ * tells us the connection will be kept alive for our
+ * pleasure. Default action for 1.0 is to close.
+ *
+ * [RFC2068, section 19.7.1] */
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 connection set to keep alive!\n");
+ }
+ else if (compareheader(k->p, "Connection:", "close")) {
+ /*
+ * [RFC 2616, section 8.1.2.1]
+ * "Connection: close" is HTTP/1.1 language and means that
+ * the connection will close when this request has been
+ * served.
+ */
+ conn->bits.close = TRUE; /* close when done */
+ }
+ else if (compareheader(k->p, "Transfer-Encoding:", "chunked")) {
+ /*
+ * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
+ * means that the server will send a series of "chunks". Each
+ * chunk starts with line with info (including size of the
+ * coming block) (terminated with CRLF), then a block of data
+ * with the previously mentioned size. There can be any amount
+ * of chunks, and a chunk-data set to zero signals the
+ * end-of-chunks. */
+ conn->bits.chunk = TRUE; /* chunks coming our way */
+
+ /* init our chunky engine */
+ Curl_httpchunk_init(conn);
+ }
+ else if (strnequal("Content-Range:", k->p, 14)) {
+ if (sscanf (k->p+14, " bytes %d-", &k->offset) ||
+ sscanf (k->p+14, " bytes: %d-", &k->offset)) {
+ /* This second format was added August 1st 2000 by Igor
+ Khristophorov since Sun's webserver JavaWebServer/1.1.1
+ obviously sends the header this way! :-( */
+ if (conn->resume_from == k->offset) {
+ /* we asked for a resume and we got it */
+ k->content_range = TRUE;
}
+ }
+ }
+ else if(data->cookies &&
+ strnequal("Set-Cookie:", k->p, 11)) {
+ Curl_cookie_add(data->cookies, TRUE, k->p+12, conn->name);
+ }
+ else if(strnequal("Last-Modified:", k->p,
+ strlen("Last-Modified:")) &&
+ (data->set.timecondition || data->set.get_filetime) ) {
+ time_t secs=time(NULL);
+ k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
+ &secs);
+ if(data->set.get_filetime>=0)
+ data->info.filetime = k->timeofdoc;
+ }
+ else if ((k->httpcode >= 300 && k->httpcode < 400) &&
+ (data->set.http_follow_location) &&
+ strnequal("Location:", k->p, 9)) {
+ /* this is the URL that the server advices us to get instead */
+ char *ptr;
+ char *start=k->p;
+ char backup;
+
+ start += 9; /* pass "Location:" */
+
+ /* Skip spaces and tabs. We do this to support multiple
+ white spaces after the "Location:" keyword. */
+ while(*start && isspace((int)*start ))
+ start++;
+ ptr = start; /* start scanning here */
+
+ /* scan through the string to find the end */
+ while(*ptr && !isspace((int)*ptr))
+ ptr++;
+ backup = *ptr; /* store the ending letter */
+ *ptr = '\0'; /* zero terminate */
+ conn->newurl = strdup(start); /* clone string */
+ *ptr = backup; /* restore ending letter */
+ }
- /*
- * End of header-checks. Write them to the client.
- */
+ /*
+ * End of header-checks. Write them to the client.
+ */
- writetype = CLIENTWRITE_HEADER;
- if (data->set.http_include_header)
- writetype |= CLIENTWRITE_BODY;
+ k->writetype = CLIENTWRITE_HEADER;
+ if (data->set.http_include_header)
+ k->writetype |= CLIENTWRITE_BODY;
- urg = Curl_client_write(data, writetype, p, hbuflen);
- if(urg)
- return urg;
+ result = Curl_client_write(data, k->writetype, k->p,
+ k->hbuflen);
+ if(result)
+ return result;
- data->info.header_size += hbuflen;
+ data->info.header_size += k->hbuflen;
- /* reset hbufp pointer && hbuflen */
- hbufp = data->state.headerbuff;
- hbuflen = 0;
- }
- while (*str); /* header line within buffer */
+ /* reset hbufp pointer && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ }
+ while (*k->str); /* header line within buffer */
- /* We might have reached the end of the header part here, but
- there might be a non-header part left in the end of the read
- buffer. */
+ /* We might have reached the end of the header part here, but
+ there might be a non-header part left in the end of the read
+ buffer. */
- if (!header) {
- /* the next token and forward is not part of
- the header! */
+ if (!k->header) {
+ /* the next token and forward is not part of
+ the header! */
- /* we subtract the remaining header size from the buffer */
- nread -= (str - buf);
- }
+ /* we subtract the remaining header size from the buffer */
+ nread -= (k->str - k->buf);
+ }
- } /* end if header mode */
+ } /* end if header mode */
- /* This is not an 'else if' since it may be a rest from the header
- parsing, where the beginning of the buffer is headers and the end
- is non-headers. */
- if (str && !header && ((signed int)nread > 0)) {
+ /* This is not an 'else if' since it may be a rest from the header
+ parsing, where the beginning of the buffer is headers and the end
+ is non-headers. */
+ if (k->str && !k->header && ((signed int)nread > 0)) {
- if(0 == bodywrites) {
- /* These checks are only made the first time we are about to
- write a piece of the body */
- if(conn->protocol&PROT_HTTP) {
- /* HTTP-only checks */
- if (conn->newurl) {
- /* abort after the headers if "follow Location" is set */
- infof (data, "Follow to new URL: %s\n", conn->newurl);
- return CURLE_OK;
- }
- else if (conn->resume_from &&
- !content_range &&
- (data->set.httpreq==HTTPREQ_GET)) {
- /* we wanted to resume a download, although the server
- doesn't seem to support this and we did this with a GET
- (if it wasn't a GET we did a POST or PUT resume) */
- failf (data, "HTTP server doesn't seem to support "
- "byte ranges. Cannot resume.");
- return CURLE_HTTP_RANGE_ERROR;
- }
- else if(data->set.timecondition && !conn->range) {
- /* A time condition has been set AND no ranges have been
- requested. This seems to be what chapter 13.3.4 of
- RFC 2616 defines to be the correct action for a
- HTTP/1.1 client */
- if((timeofdoc > 0) && (data->set.timevalue > 0)) {
- switch(data->set.timecondition) {
- case TIMECOND_IFMODSINCE:
- default:
- if(timeofdoc < data->set.timevalue) {
- infof(data,
- "The requested document is not new enough\n");
- return CURLE_OK;
- }
- break;
- case TIMECOND_IFUNMODSINCE:
- if(timeofdoc > data->set.timevalue) {
- infof(data,
- "The requested document is not old enough\n");
- return CURLE_OK;
- }
- break;
- } /* switch */
- } /* two valid time strings */
- } /* we have a time condition */
-
- } /* this is HTTP */
- } /* this is the first time we write a body part */
- bodywrites++;
-
- if(conn->bits.chunk) {
- /*
- * Bless me father for I have sinned. Here comes a chunked
- * transfer flying and we need to decode this properly. While
- * the name says read, this function both reads and writes away
- * the data. The returned 'nread' holds the number of actual
- * data it wrote to the client. */
- CHUNKcode res =
- Curl_httpchunk_read(conn, str, nread, &nread);
-
- if(CHUNKE_OK < res) {
- failf(data, "Receeived problem in the chunky parser");
- return CURLE_READ_ERROR;
- }
- else if(CHUNKE_STOP == res) {
- /* we're done reading chunks! */
- keepon &= ~KEEP_READ; /* read no more */
- FD_ZERO(&rkeepfd);
-
- /* There are now possibly N number of bytes at the end of the
- str buffer that weren't written to the client, but we don't
- care about them right now. */
- }
- /* If it returned OK, we just keep going */
+ if(0 == k->bodywrites) {
+ /* These checks are only made the first time we are about to
+ write a piece of the body */
+ if(conn->protocol&PROT_HTTP) {
+ /* HTTP-only checks */
+ if (conn->newurl) {
+ /* abort after the headers if "follow Location" is set */
+ infof (data, "Follow to new URL: %s\n", conn->newurl);
+ return CURLE_OK;
}
+ else if (conn->resume_from &&
+ !k->content_range &&
+ (data->set.httpreq==HTTPREQ_GET)) {
+ /* we wanted to resume a download, although the server
+ doesn't seem to support this and we did this with a GET
+ (if it wasn't a GET we did a POST or PUT resume) */
+ failf (data, "HTTP server doesn't seem to support "
+ "byte ranges. Cannot resume.");
+ return CURLE_HTTP_RANGE_ERROR;
+ }
+ else if(data->set.timecondition && !conn->range) {
+ /* A time condition has been set AND no ranges have been
+ requested. This seems to be what chapter 13.3.4 of
+ RFC 2616 defines to be the correct action for a
+ HTTP/1.1 client */
+ if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
+ switch(data->set.timecondition) {
+ case TIMECOND_IFMODSINCE:
+ default:
+ if(k->timeofdoc < data->set.timevalue) {
+ infof(data,
+ "The requested document is not new enough\n");
+ return CURLE_OK;
+ }
+ break;
+ case TIMECOND_IFUNMODSINCE:
+ if(k->timeofdoc > data->set.timevalue) {
+ infof(data,
+ "The requested document is not old enough\n");
+ return CURLE_OK;
+ }
+ break;
+ } /* switch */
+ } /* two valid time strings */
+ } /* we have a time condition */
- if((-1 != conn->maxdownload) &&
- (bytecount + nread >= conn->maxdownload)) {
- nread = conn->maxdownload - bytecount;
- if((signed int)nread < 0 ) /* this should be unusual */
- nread = 0;
+ } /* this is HTTP */
+ } /* this is the first time we write a body part */
+ k->bodywrites++;
- keepon &= ~KEEP_READ; /* we're done reading */
- FD_ZERO(&rkeepfd);
- }
+ if(conn->bits.chunk) {
+ /*
+ * Bless me father for I have sinned. Here comes a chunked
+ * transfer flying and we need to decode this properly. While
+ * the name says read, this function both reads and writes away
+ * the data. The returned 'nread' holds the number of actual
+ * data it wrote to the client. */
+ CHUNKcode res =
+ Curl_httpchunk_read(conn, k->str, nread, &nread);
+
+ if(CHUNKE_OK < res) {
+ failf(data, "Receeived problem in the chunky parser");
+ return CURLE_READ_ERROR;
+ }
+ else if(CHUNKE_STOP == res) {
+ /* we're done reading chunks! */
+ k->keepon &= ~KEEP_READ; /* read no more */
+ FD_ZERO(&k->rkeepfd);
+
+ /* There are now possibly N number of bytes at the end of the
+ str buffer that weren't written to the client, but we don't
+ care about them right now. */
+ }
+ /* If it returned OK, we just keep going */
+ }
+
+ if((-1 != conn->maxdownload) &&
+ (k->bytecount + nread >= conn->maxdownload)) {
+ nread = conn->maxdownload - k->bytecount;
+ if((signed int)nread < 0 ) /* this should be unusual */
+ nread = 0;
+
+ k->keepon &= ~KEEP_READ; /* we're done reading */
+ FD_ZERO(&k->rkeepfd);
+ }
- bytecount += nread;
+ k->bytecount += nread;
- Curl_pgrsSetDownloadCounter(data, (double)bytecount);
+ Curl_pgrsSetDownloadCounter(data, (double)k->bytecount);
- if(!conn->bits.chunk && nread) {
- /* If this is chunky transfer, it was already written */
- urg = Curl_client_write(data, CLIENTWRITE_BODY, str, nread);
- if(urg)
- return urg;
- }
+ if(!conn->bits.chunk && nread) {
+ /* If this is chunky transfer, it was already written */
+ result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
+ nread);
+ if(result)
+ return result;
+ }
- } /* if (! header and data to read ) */
- } /* if( read from socket ) */
+ } /* if (! header and data to read ) */
+ } /* if( read from socket ) */
- if((keepon & KEEP_WRITE) && FD_ISSET(conn->writesockfd, &writefd)) {
- /* write */
+ if((k->keepon & KEEP_WRITE) &&
+ FD_ISSET(conn->writesockfd, &k->writefd)) {
+ /* write */
- int i, si;
- size_t bytes_written;
+ int i, si;
+ size_t bytes_written;
- nread = data->set.fread(buf, 1, conn->upload_bufsize, data->set.in);
+ if ((k->bytecount == 0) && (k->writebytecount == 0))
+ Curl_pgrsTime(data, TIMER_STARTTRANSFER);
- /* the signed int typecase of nread of for systems that has
- unsigned size_t */
- if ((signed int)nread<=0) {
- /* done */
- keepon &= ~KEEP_WRITE; /* we're done writing */
- FD_ZERO(&wkeepfd);
- break;
- }
- writebytecount += nread;
- Curl_pgrsSetUploadCounter(data, (double)writebytecount);
-
- /* convert LF to CRLF if so asked */
- if (data->set.crlf) {
- for(i = 0, si = 0; i < (int)nread; i++, si++) {
- if (buf[i] == 0x0a) {
- data->state.scratch[si++] = 0x0d;
- data->state.scratch[si] = 0x0a;
- }
- else {
- data->state.scratch[si] = buf[i];
- }
- }
- nread = si;
- buf = data->state.scratch; /* point to the new buffer */
- }
+ didwhat |= KEEP_WRITE;
- /* write to socket */
- urg = Curl_write(conn, conn->writesockfd, buf, nread,
- &bytes_written);
+ nread = data->set.fread(k->buf, 1, conn->upload_bufsize,
+ data->set.in);
- if(nread != (int)bytes_written) {
- failf(data, "Failed uploading data");
- return CURLE_WRITE_ERROR;
+ /* the signed int typecase of nread of for systems that has
+ unsigned size_t */
+ if ((signed int)nread<=0) {
+ /* done */
+ k->keepon &= ~KEEP_WRITE; /* we're done writing */
+ FD_ZERO(&k->wkeepfd);
+ break;
+ }
+ k->writebytecount += nread;
+ Curl_pgrsSetUploadCounter(data, (double)k->writebytecount);
+
+ /* convert LF to CRLF if so asked */
+ if (data->set.crlf) {
+ for(i = 0, si = 0; i < (int)nread; i++, si++) {
+ if (k->buf[i] == 0x0a) {
+ data->state.scratch[si++] = 0x0d;
+ data->state.scratch[si] = 0x0a;
+ }
+ else {
+ data->state.scratch[si] = k->buf[i];
}
- if(data->set.crlf)
- buf = data->state.buffer; /* put it back on the buffer */
-
}
+ nread = si;
+ k->buf = data->state.scratch; /* point to the new buffer */
+ }
- break;
+ /* write to socket */
+ result = Curl_write(conn, conn->writesockfd, k->buf, nread,
+ &bytes_written);
+ if(result)
+ return result;
+ else if(nread != (int)bytes_written) {
+ failf(data, "Failed uploading data");
+ return CURLE_WRITE_ERROR;
}
+ else if(data->set.crlf)
+ k->buf = data->state.buffer; /* put it back on the buffer */
- /* Update read/write counters */
- if(conn->bytecountp)
- *conn->bytecountp = bytecount; /* read count */
- if(conn->writebytecountp)
- *conn->writebytecountp = writebytecount; /* write count */
+ }
- now = Curl_tvnow();
- if(Curl_pgrsUpdate(conn))
- urg = CURLE_ABORTED_BY_CALLBACK;
- else
- urg = Curl_speedcheck (data, now);
- if (urg)
- return urg;
-
- if(data->progress.ulspeed > conn->upload_bufsize) {
- /* If we're transfering more data per second than fits in our buffer,
- we increase the buffer size to adjust to the current
- speed. However, we must not set it larger than BUFSIZE. We don't
- adjust it downwards again since we don't see any point in that!
- */
- conn->upload_bufsize=(long)min(data->progress.ulspeed, BUFSIZE);
- }
+ } while(0); /* just to break out from! */
- if (data->set.timeout &&
- ((Curl_tvdiff(now, start)/1000) >= data->set.timeout)) {
- failf (data, "Operation timed out with %d out of %d bytes received",
- bytecount, conn->size);
- return CURLE_OPERATION_TIMEOUTED;
- }
+ if(didwhat) {
+ /* Update read/write counters */
+ if(conn->bytecountp)
+ *conn->bytecountp = k->bytecount; /* read count */
+ if(conn->writebytecountp)
+ *conn->writebytecountp = k->writebytecount; /* write count */
+ }
+ else {
+ /* no read no write, this is a timeout? */
+ if (k->write_after_100_header) {
+ /* This should allow some time for the header to arrive, but only a
+ very short time as otherwise it'll be too much wasted times too
+ often. */
+ k->write_after_100_header = FALSE;
+ FD_SET (conn->writesockfd, &k->writefd); /* write socket */
+ k->keepon |= KEEP_WRITE;
+ k->wkeepfd = k->writefd;
+ }
+ }
+ k->now = Curl_tvnow();
+ if(Curl_pgrsUpdate(conn))
+ result = CURLE_ABORTED_BY_CALLBACK;
+ else
+ result = Curl_speedcheck (data, k->now);
+ if (result)
+ return result;
+
+ if(data->progress.ulspeed > conn->upload_bufsize) {
+ /* If we're transfering more data per second than fits in our buffer,
+ we increase the buffer size to adjust to the current
+ speed. However, we must not set it larger than BUFSIZE. We don't
+ adjust it downwards again since we don't see any point in that!
+ */
+ conn->upload_bufsize=(long)min(data->progress.ulspeed, BUFSIZE);
+ }
+
+ if (data->set.timeout &&
+ ((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
+ failf (data, "Operation timed out with %d out of %d bytes received",
+ k->bytecount, conn->size);
+ return CURLE_OPERATION_TIMEOUTED;
+ }
+
+ if(!k->keepon) {
+ /*
+ * The transfer has been performed. Just make some general checks before
+ * returning.
+ */
+
+ if(!(data->set.no_body) && k->contentlength &&
+ (k->bytecount != k->contentlength) &&
+ !conn->newurl) {
+ failf(data, "transfer closed with %d bytes remaining to read",
+ k->contentlength-k->bytecount);
+ return CURLE_PARTIAL_FILE;
}
+ else if(conn->bits.chunk && conn->proto.http->chunk.datasize) {
+ failf(data, "transfer closed with at least %d bytes remaining",
+ conn->proto.http->chunk.datasize);
+ return CURLE_PARTIAL_FILE;
+ }
+ if(Curl_pgrsUpdate(conn))
+ return CURLE_ABORTED_BY_CALLBACK;
}
- /*
- * The tranfer has been performed. Just make some general checks before
- * returning.
- */
+ /* Now update the "done" boolean we return */
+ *done = !k->keepon;
+
+ return CURLE_OK;
+}
+
+CURLcode Curl_readwrite_init(struct connectdata *conn)
+{
+ struct SessionHandle *data = conn->data;
+ struct Curl_transfer_keeper *k = &conn->keep;
+
+ memset(k, 0, sizeof(struct Curl_transfer_keeper));
+
+ k->start = Curl_tvnow(); /* start time */
+ k->now = k->start; /* current time is now */
+ k->header = TRUE; /* assume header */
+ k->httpversion = -1; /* unknown at this point */
+ k->conn = (struct connectdata *)conn; /* store the connection */
+
+ data = conn->data; /* there's the root struct */
+ k->buf = data->state.buffer;
+ k->maxfd = (conn->sockfd>conn->writesockfd?
+ conn->sockfd:conn->writesockfd)+1;
+ k->hbufp = data->state.headerbuff;
- if(!(data->set.no_body) && contentlength &&
- (bytecount != contentlength)) {
- failf(data, "transfer closed with %d bytes remaining to read",
- contentlength-bytecount);
- return CURLE_PARTIAL_FILE;
+ Curl_pgrsTime(data, TIMER_PRETRANSFER);
+ Curl_speedinit(data);
+
+ if (!conn->getheader) {
+ k->header = FALSE;
+ if(conn->size > 0)
+ Curl_pgrsSetDownloadSize(data, conn->size);
}
- else if(conn->bits.chunk && conn->proto.http->chunk.datasize) {
- failf(data, "transfer closed with at least %d bytes remaining",
- conn->proto.http->chunk.datasize);
- return CURLE_PARTIAL_FILE;
+ /* we want header and/or body, if neither then don't do this! */
+ if(conn->getheader || !data->set.no_body) {
+
+ FD_ZERO (&k->readfd); /* clear it */
+ if(conn->sockfd != -1) {
+ FD_SET (conn->sockfd, &k->readfd); /* read socket */
+ k->keepon |= KEEP_READ;
+ }
+
+ FD_ZERO (&k->writefd); /* clear it */
+ if(conn->writesockfd != -1) {
+ if (data->set.expect100header)
+ /* wait with write until we either got 100-continue or a timeout */
+ k->write_after_100_header = TRUE;
+ else {
+ FD_SET (conn->writesockfd, &k->writefd); /* write socket */
+ k->keepon |= KEEP_WRITE;
+ }
+ }
+
+ /* get these in backup variables to be able to restore them on each lap in
+ the select() loop */
+ k->rkeepfd = k->readfd;
+ k->wkeepfd = k->writefd;
+
}
- if(Curl_pgrsUpdate(conn))
- return CURLE_ABORTED_BY_CALLBACK;
return CURLE_OK;
}
-CURLcode Curl_perform(struct SessionHandle *data)
+void Curl_single_fdset(struct connectdata *conn,
+ fd_set *read_fd_set,
+ fd_set *write_fd_set,
+ fd_set *exc_fd_set,
+ int *max_fd)
{
- CURLcode res;
- struct connectdata *conn=NULL;
- bool port=TRUE; /* allow data->set.use_port to set port to use */
- char *newurl = NULL; /* possibly a new URL to follow to! */
-#ifdef HAVE_SIGNAL
- /* storage for the previous bag^H^H^HSIGPIPE signal handler :-) */
- void (*prev_signal)(int sig);
+ *max_fd = -1; /* init */
+ if(conn->keep.keepon & KEEP_READ) {
+ FD_SET(conn->sockfd, read_fd_set);
+ *max_fd = conn->sockfd;
+ }
+ if(conn->keep.keepon & KEEP_WRITE) {
+ FD_SET(conn->writesockfd, write_fd_set);
+ if(conn->writesockfd > *max_fd)
+ *max_fd = conn->writesockfd;
+ }
+ /* we don't use exceptions, only touch that one to prevent compiler
+ warnings! */
+ *exc_fd_set = *exc_fd_set;
+}
+
+
+/*
+ * Transfer()
+ *
+ * This function is what performs the actual transfer. It is capable of
+ * doing both ways simultaneously.
+ * The transfer must already have been setup by a call to Curl_Transfer().
+ *
+ * Note that headers are created in a preallocated buffer of a default size.
+ * That buffer can be enlarged on demand, but it is never shrinken again.
+ *
+ * Parts of this function was once written by the friendly Mark Butler
+ * <butlerm@xmission.com>.
+ */
+
+static CURLcode
+Transfer(struct connectdata *conn)
+{
+ struct SessionHandle *data = conn->data;
+ CURLcode result;
+ struct Curl_transfer_keeper *k = &conn->keep;
+ bool done=FALSE;
+
+ Curl_readwrite_init(conn);
+
+ if((conn->sockfd == -1) && (conn->writesockfd == -1))
+ /* nothing to read, nothing to write, we're already OK! */
+ return CURLE_OK;
+
+ /* we want header and/or body, if neither then don't do this! */
+ if(!conn->getheader && data->set.no_body)
+ return CURLE_OK;
+
+ while (!done) {
+ struct timeval interval;
+ k->readfd = k->rkeepfd; /* set these every lap in the loop */
+ k->writefd = k->wkeepfd;
+ interval.tv_sec = 1;
+ interval.tv_usec = 0;
+
+ switch (select (k->maxfd, &k->readfd, &k->writefd, NULL,
+ &interval)) {
+ case -1: /* select() error, stop reading */
+#ifdef EINTR
+ /* The EINTR is not serious, and it seems you might get this more
+ ofen when using the lib in a multi-threaded environment! */
+ if(errno == EINTR)
+ ;
+ else
#endif
+ done = TRUE; /* no more read or write */
+ continue;
+ case 0: /* timeout */
+ result = Curl_readwrite(conn, &done);
+ break;
+ default: /* readable descriptors */
+ result = Curl_readwrite(conn, &done);
+ break;
+ }
+ if(result)
+ return result;
+
+ /* "done" signals to us if the transfer(s) are ready */
+ }
+ return CURLE_OK;
+}
+
+CURLcode Curl_pretransfer(struct SessionHandle *data)
+{
if(!data->change.url)
/* we can't do anything wihout URL */
return CURLE_URL_MALFORMAT;
@@ -922,16 +973,45 @@ CURLcode Curl_perform(struct SessionHandle *data)
data->state.this_is_a_follow = FALSE; /* reset this */
data->state.errorbuf = FALSE; /* no error has occurred */
+ /* Allow data->set.use_port to set which port to use. This needs to be
+ * disabled for example when we follow Location: headers to URLs using
+ * different ports! */
+ data->state.allow_port = TRUE;
+
#if defined(HAVE_SIGNAL) && defined(SIGPIPE)
/*************************************************************
* Tell signal handler to ignore SIGPIPE
*************************************************************/
- prev_signal = signal(SIGPIPE, SIG_IGN);
+ data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
#endif
Curl_initinfo(data); /* reset session-specific information "variables" */
Curl_pgrsStartNow(data);
+ return CURLE_OK;
+}
+
+CURLcode Curl_posttransfer(struct SessionHandle *data)
+{
+#if defined(HAVE_SIGNAL) && defined(SIGPIPE)
+ /* restore the signal handler for SIGPIPE before we get back */
+ signal(SIGPIPE, data->state.prev_signal);
+#endif
+
+ return CURLE_OK;
+}
+
+CURLcode Curl_perform(struct SessionHandle *data)
+{
+ CURLcode res;
+ CURLcode res2;
+ struct connectdata *conn=NULL;
+ char *newurl = NULL; /* possibly a new URL to follow to! */
+
+ res = Curl_pretransfer(data);
+ if(res)
+ return res;
+
/*
* It is important that there is NO 'return' from this function any any
* other place than falling down the bottom! This is because we have cleanup
@@ -941,30 +1021,9 @@ CURLcode Curl_perform(struct SessionHandle *data)
do {
Curl_pgrsTime(data, TIMER_STARTSINGLE);
- res = Curl_connect(data, &conn, port);
+ res = Curl_connect(data, &conn);
if(res == CURLE_OK) {
- res = Curl_do(conn);
-
- if((CURLE_WRITE_ERROR == res) && conn->bits.reuse) {
- /* This was a re-use of a connection and we got a write error in the
- * DO-phase. Then we DISCONNECT this connection and have another
- * attempt to CONNECT and then DO again! The retry cannot possibly
- * find another connection to re-use, since we only keep one possible
- * connection for each.
- */
-
- infof(data, "The re-used connection seems dead, get a new one\n");
-
- conn->bits.close = TRUE; /* enforce close of this connetion */
- res = Curl_done(conn); /* we are so done with this */
- if(CURLE_OK == res) {
- /* Now, redo the connect */
- res = Curl_connect(data, &conn, port);
- if(CURLE_OK == res)
- /* ... finally back to actually retry the DO phase */
- res = Curl_do(conn);
- }
- }
+ res = Curl_do(&conn);
if(res == CURLE_OK) {
CURLcode res2; /* just a local extra result container */
@@ -1010,9 +1069,6 @@ CURLcode Curl_perform(struct SessionHandle *data)
char prot[16]; /* URL protocol string storage */
char letter; /* used for a silly sscanf */
- port=TRUE; /* by default we use the user set port number even after
- a Location: */
-
if (data->set.maxredirs && (data->set.followlocation >= data->set.maxredirs)) {
failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
res=CURLE_TOO_MANY_REDIRECTS;
@@ -1101,10 +1157,9 @@ CURLcode Curl_perform(struct SessionHandle *data)
free(url_clone);
newurl = newest;
}
- else {
- /* This is an absolute URL, don't use the custom port number */
- port = FALSE;
- }
+ else
+ /* This is an absolute URL, don't allow the custom port number */
+ data->state.allow_port = FALSE;
if(data->change.url_alloc)
free(data->change.url);
@@ -1182,10 +1237,11 @@ CURLcode Curl_perform(struct SessionHandle *data)
if(newurl)
free(newurl);
-#if defined(HAVE_SIGNAL) && defined(SIGPIPE)
- /* restore the signal handler for SIGPIPE before we get back */
- signal(SIGPIPE, prev_signal);
-#endif
+ /* run post-transfer uncondionally, but don't clobber the return code if
+ we already have an error code recorder */
+ res2 = Curl_posttransfer(data);
+ if(!res && res2)
+ res = res2;
return res;
}
diff --git a/lib/transfer.h b/lib/transfer.h
index c35f7c6fb..6f616c8ad 100644
--- a/lib/transfer.h
+++ b/lib/transfer.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 2000, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 2001, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
@@ -24,6 +24,17 @@
*****************************************************************************/
CURLcode Curl_perform(struct SessionHandle *data);
+CURLcode Curl_pretransfer(struct SessionHandle *data);
+CURLcode Curl_posttransfer(struct SessionHandle *data);
+
+CURLcode Curl_readwrite(struct connectdata *conn, bool *done);
+void Curl_single_fdset(struct connectdata *conn,
+ fd_set *read_fd_set,
+ fd_set *write_fd_set,
+ fd_set *exc_fd_set,
+ int *max_fd);
+CURLcode Curl_readwrite_init(struct connectdata *conn);
+
/* This sets up a forthcoming transfer */
CURLcode
Curl_Transfer (struct connectdata *data,
diff --git a/lib/url.c b/lib/url.c
index 5a477a26d..3973902ef 100644
--- a/lib/url.c
+++ b/lib/url.c
@@ -1214,8 +1214,7 @@ static CURLcode ConnectPlease(struct connectdata *conn)
}
static CURLcode CreateConnection(struct SessionHandle *data,
- struct connectdata **in_connect,
- bool allow_port) /* allow set.use_port? */
+ struct connectdata **in_connect)
{
char *tmp;
char *buf;
@@ -1614,7 +1613,8 @@ static CURLcode CreateConnection(struct SessionHandle *data,
*************************************************************/
if (strequal(conn->protostr, "HTTP")) {
- conn->port = (data->set.use_port && allow_port)?data->set.use_port:PORT_HTTP;
+ conn->port = (data->set.use_port && data->state.allow_port)?
+ data->set.use_port:PORT_HTTP;
conn->remote_port = PORT_HTTP;
conn->protocol |= PROT_HTTP;
conn->curl_do = Curl_http;
@@ -1624,7 +1624,8 @@ static CURLcode CreateConnection(struct SessionHandle *data,
else if (strequal(conn->protostr, "HTTPS")) {
#ifdef USE_SSLEAY
- conn->port = (data->set.use_port && allow_port)?data->set.use_port:PORT_HTTPS;
+ conn->port = (data->set.use_port && data->state.allow_port)?
+ data->set.use_port:PORT_HTTPS;
conn->remote_port = PORT_HTTPS;
conn->protocol |= PROT_HTTP|PROT_HTTPS|PROT_SSL;
@@ -1639,7 +1640,8 @@ static CURLcode CreateConnection(struct SessionHandle *data,
#endif /* !USE_SSLEAY */
}
else if (strequal(conn->protostr, "GOPHER")) {
- conn->port = (data->set.use_port && allow_port)?data->set.use_port:PORT_GOPHER;
+ conn->port = (data->set.use_port && data->state.allow_port)?
+ data->set.use_port:PORT_GOPHER;
conn->remote_port = PORT_GOPHER;
/* Skip /<item-type>/ in path if present */
if (isdigit((int)conn->path[1])) {
@@ -1665,7 +1667,8 @@ static CURLcode CreateConnection(struct SessionHandle *data,
#endif /* !USE_SSLEAY */
}
- conn->port = (data->set.use_port && allow_port)?data->set.use_port:PORT_FTP;
+ conn->port = (data->set.use_port && data->state.allow_port)?
+ data->set.use_port:PORT_FTP;
conn->remote_port = PORT_FTP;
conn->protocol |= PROT_FTP;
@@ -1720,21 +1723,24 @@ static CURLcode CreateConnection(struct SessionHandle *data,
/* telnet testing factory */
conn->protocol |= PROT_TELNET;
- conn->port = (data->set.use_port && allow_port)?data->set.use_port: PORT_TELNET;
+ conn->port = (data->set.use_port && data->state.allow_port)?
+ data->set.use_port: PORT_TELNET;
conn->remote_port = PORT_TELNET;
conn->curl_do = Curl_telnet;
conn->curl_done = Curl_telnet_done;
}
else if (strequal(conn->protostr, "DICT")) {
conn->protocol |= PROT_DICT;
- conn->port = (data->set.use_port && allow_port)?data->set.use_port:PORT_DICT;
+ conn->port = (data->set.use_port && data->state.allow_port)?
+ data->set.use_port:PORT_DICT;
conn->remote_port = PORT_DICT;
conn->curl_do = Curl_dict;
conn->curl_done = NULL; /* no DICT-specific done */
}
else if (strequal(conn->protostr, "LDAP")) {
conn->protocol |= PROT_LDAP;
- conn->port = (data->set.use_port && allow_port)?data->set.use_port:PORT_LDAP;
+ conn->port = (data->set.use_port && data->state.allow_port)?
+ data->set.use_port:PORT_LDAP;
conn->remote_port = PORT_LDAP;
conn->curl_do = Curl_ldap;
conn->curl_done = NULL; /* no LDAP-specific done */
@@ -2228,14 +2234,13 @@ static CURLcode CreateConnection(struct SessionHandle *data,
}
CURLcode Curl_connect(struct SessionHandle *data,
- struct connectdata **in_connect,
- bool allow_port)
+ struct connectdata **in_connect)
{
CURLcode code;
struct connectdata *conn;
/* call the stuff that needs to be called */
- code = CreateConnection(data, in_connect, allow_port);
+ code = CreateConnection(data, in_connect);
if(CURLE_OK != code) {
/* We're not allowed to return failure with memory left allocated
@@ -2291,14 +2296,38 @@ CURLcode Curl_done(struct connectdata *conn)
return result;
}
-CURLcode Curl_do(struct connectdata *conn)
+CURLcode Curl_do(struct connectdata **connp)
{
CURLcode result=CURLE_OK;
+ struct connectdata *conn = *connp;
+ struct SessionHandle *data=conn->data;
- if(conn->curl_do)
+ if(conn->curl_do) {
/* generic protocol-specific function pointer set in curl_connect() */
result = conn->curl_do(conn);
+ /* This was formerly done in transfer.c, but we better do it here */
+
+ if((CURLE_WRITE_ERROR == result) && conn->bits.reuse) {
+ /* This was a re-use of a connection and we got a write error in the
+ * DO-phase. Then we DISCONNECT this connection and have another attempt
+ * to CONNECT and then DO again! The retry cannot possibly find another
+ * connection to re-use, since we only keep one possible connection for
+ * each. */
+
+ infof(data, "Re-used connection seems dead, get a new one\n");
+
+ conn->bits.close = TRUE; /* enforce close of this connetion */
+ result = Curl_done(conn); /* we are so done with this */
+ if(CURLE_OK == result) {
+ /* Now, redo the connect and get a new connection */
+ result = Curl_connect(data, connp);
+ if(CURLE_OK == result)
+ /* ... finally back to actually retry the DO phase */
+ result = conn->curl_do(*connp);
+ }
+ }
+ }
return result;
}
diff --git a/lib/url.h b/lib/url.h
index c842a97f2..de3c02ef2 100644
--- a/lib/url.h
+++ b/lib/url.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 2000, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 2001, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* In order to be useful for every potential user, curl and libcurl are
* dual-licensed under the MPL and the MIT/X-derivate licenses.
@@ -29,11 +29,9 @@
CURLcode Curl_open(struct SessionHandle **curl);
CURLcode Curl_setopt(struct SessionHandle *data, CURLoption option, ...);
-CURLcode Curl_close(struct SessionHandle *data); /* the opposite of curl_open() */
-CURLcode Curl_connect(struct SessionHandle *,
- struct connectdata **,
- bool allow_port);
-CURLcode Curl_do(struct connectdata *);
+CURLcode Curl_close(struct SessionHandle *data); /* opposite of curl_open() */
+CURLcode Curl_connect(struct SessionHandle *, struct connectdata **);
+CURLcode Curl_do(struct connectdata **);
CURLcode Curl_done(struct connectdata *);
CURLcode Curl_disconnect(struct connectdata *);
diff --git a/lib/urldata.h b/lib/urldata.h
index 00ef3b355..b7f3a2f21 100644
--- a/lib/urldata.h
+++ b/lib/urldata.h
@@ -212,6 +212,57 @@ struct ConnectBits {
};
/*
+ * This struct is all the previously local variables from Curl_perform() moved
+ * to struct to allow the function to return and get re-invoked better without
+ * losing state.
+ */
+
+struct Curl_transfer_keeper {
+ int bytecount; /* total number of bytes read */
+ int writebytecount; /* number of bytes written */
+ long contentlength; /* size of incoming data */
+ struct timeval start; /* transfer started at this time */
+ struct timeval now; /* current time */
+ bool header; /* incoming data has HTTP header */
+ int headerline; /* counts header lines to better track the
+ first one */
+ char *hbufp; /* points at *end* of header line */
+ int hbuflen;
+ char *str; /* within buf */
+ char *str_start; /* within buf */
+ char *end_ptr; /* within buf */
+ char *p; /* within headerbuff */
+ bool content_range; /* set TRUE if Content-Range: was found */
+ int offset; /* possible resume offset read from the
+ Content-Range: header */
+ int httpcode; /* error code from the 'HTTP/1.? XXX' line */
+ int httpversion; /* the HTTP version*10 */
+ bool write_after_100_header; /* should we enable the write after
+ we received a 100-continue/timeout
+ or directly */
+
+ /* for the low speed checks: */
+ time_t timeofdoc;
+ long bodywrites;
+ int writetype;
+
+ /* the highest fd we use + 1 */
+ struct SessionHandle *data;
+ struct connectdata *conn;
+ char *buf;
+ int maxfd;
+
+ /* the file descriptors to play with */
+ fd_set readfd;
+ fd_set writefd;
+ fd_set rkeepfd;
+ fd_set wkeepfd;
+ int keepon;
+
+};
+
+
+/*
* The connectdata struct contains all fields and variables that should be
* unique for an entire connection.
*/
@@ -355,6 +406,8 @@ struct connectdata {
void *generic;
} proto;
+ /* This struct is inited when needed */
+ struct Curl_transfer_keeper keep;
};
/*
@@ -460,6 +513,13 @@ struct UrlState {
bool errorbuf; /* Set to TRUE if the error buffer is already filled in.
This must be set to FALSE every time _easy_perform() is
called. */
+
+#ifdef HAVE_SIGNAL
+ /* storage for the previous bag^H^H^HSIGPIPE signal handler :-) */
+ void (*prev_signal)(int sig);
+#endif
+ bool allow_port; /* Is set.use_port allowed to take effect or not. This
+ is always set TRUE when curl_easy_perform() is called. */
};
@@ -569,7 +629,6 @@ struct UserDefined {
bool hide_progress;
bool http_fail_on_error;
bool http_follow_location;
-
bool include_header;
#define http_include_header include_header /* former name */