aboutsummaryrefslogtreecommitdiff
path: root/lib/transfer.c
diff options
context:
space:
mode:
authorDaniel Stenberg <daniel@haxx.se>2008-01-31 12:04:33 +0000
committerDaniel Stenberg <daniel@haxx.se>2008-01-31 12:04:33 +0000
commitb620e62f0f4e90f4d1338117c67580a6f5f37377 (patch)
tree75b747c8facdd1b70656dbab4550cad324e95b19 /lib/transfer.c
parentb3186dee177d277094111939ea9d192e278ea71c (diff)
- Dmitry Kurochkin moved several struct fields from the connectdata struct to
the SingleRequest one to make pipelining better. It is a bit tricky to keep them in the right place, to keep things related to the actual request or to the actual connection in the right place.
Diffstat (limited to 'lib/transfer.c')
-rw-r--r--lib/transfer.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/lib/transfer.c b/lib/transfer.c
index 69ce8af8b..49397e8c9 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -119,7 +119,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
size_t buffersize = (size_t)bytes;
int nread;
- if(conn->bits.upload_chunky) {
+ if(data->req.upload_chunky) {
/* if chunked Transfer-Encoding */
buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
data->req.upload_fromhere += 10; /* 32bit hex + CRLF */
@@ -143,7 +143,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
/* the read function returned a too large value */
return CURLE_READ_ERROR;
- if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
+ if(!data->req.forbidchunk && data->req.upload_chunky) {
/* if chunked Transfer-Encoding */
char hexbuffer[11];
int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
@@ -594,7 +594,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
else {
k->header = FALSE; /* no more header to parse! */
- if((k->size == -1) && !conn->bits.chunk && !conn->bits.close &&
+ if((k->size == -1) && !k->chunk && !conn->bits.close &&
(k->httpversion >= 11) ) {
/* On HTTP 1.1, when connection is not to get closed, but no
Content-Length nor Content-Encoding chunked have been
@@ -683,7 +683,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
* If we requested a "no body", this is a good time to get
* out and return home.
*/
- if(conn->bits.no_body)
+ if(data->set.opt_no_body)
stop_reading = TRUE;
else {
/* If we know the expected size of this document, we set the
@@ -699,7 +699,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
Content-Length: headers if we are now receiving data
using chunked Transfer-Encoding.
*/
- if(conn->bits.chunk)
+ if(k->chunk)
k->size=-1;
}
@@ -1002,7 +1002,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
* with the previously mentioned size. There can be any amount
* of chunks, and a chunk-data set to zero signals the
* end-of-chunks. */
- conn->bits.chunk = TRUE; /* chunks coming our way */
+ k->chunk = TRUE; /* chunks coming our way */
/* init our chunky engine */
Curl_httpchunk_init(conn);
@@ -1018,7 +1018,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
*
* It seems both Trailer: and Trailers: occur in the wild.
*/
- conn->bits.trailerhdrpresent = TRUE;
+ k->trailerhdrpresent = TRUE;
}
else if(checkprefix("Content-Encoding:", k->p) &&
@@ -1258,7 +1258,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
}
#ifndef CURL_DISABLE_HTTP
- if(conn->bits.chunk) {
+ if(k->chunk) {
/*
* Here comes a chunked transfer flying and we need to decode this
* properly. While the name says read, this function both reads
@@ -1326,7 +1326,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
Curl_pgrsSetDownloadCounter(data, k->bytecount);
- if(!conn->bits.chunk && (nread || k->badheader || is_empty_data)) {
+ if(!k->chunk && (nread || k->badheader || is_empty_data)) {
/* If this is chunky transfer, it was already written */
if(k->badheader && !k->ignorebody) {
@@ -1628,7 +1628,7 @@ CURLcode Curl_readwrite(struct connectdata *conn,
* returning.
*/
- if(!(conn->bits.no_body) && (k->size != -1) &&
+ if(!(data->set.opt_no_body) && (k->size != -1) &&
(k->bytecount != k->size) &&
#ifdef CURL_DO_LINEEND_CONV
/* Most FTP servers don't adjust their file SIZE response for CRLFs,
@@ -1643,8 +1643,8 @@ CURLcode Curl_readwrite(struct connectdata *conn,
k->size - k->bytecount);
return CURLE_PARTIAL_FILE;
}
- else if(!(conn->bits.no_body) &&
- conn->bits.chunk &&
+ else if(!(data->set.opt_no_body) &&
+ k->chunk &&
(conn->chunk.state != CHUNK_STOP)) {
/*
* In chunked mode, return an error if the connection is closed prior to
@@ -1747,7 +1747,7 @@ Transfer(struct connectdata *conn)
return CURLE_OK;
/* we want header and/or body, if neither then don't do this! */
- if(!conn->bits.getheader && conn->bits.no_body)
+ if(!k->getheader && data->set.opt_no_body)
return CURLE_OK;
while(!done) {
@@ -2317,7 +2317,7 @@ bool Curl_retry_request(struct connectdata *conn,
if((data->req.bytecount +
data->req.headerbytecount == 0) &&
conn->bits.reuse &&
- !conn->bits.no_body) {
+ !data->set.opt_no_body) {
/* We got no data, we attempted to re-use a connection and yet we want a
"body". This might happen if the connection was left alive when we were
done using it before, but that was closed when we wanted to read from
@@ -2486,7 +2486,7 @@ Curl_setup_transfer(
CURL_SOCKET_BAD : conn->sock[sockindex];
conn->writesockfd = writesockindex == -1 ?
CURL_SOCKET_BAD:conn->sock[writesockindex];
- conn->bits.getheader = getheader;
+ k->getheader = getheader;
k->size = size;
k->bytecountp = bytecountp;
@@ -2496,13 +2496,13 @@ Curl_setup_transfer(
necessary input is not always known in do_complete() as this function may
be called after that */
- if(!conn->bits.getheader) {
+ if(!k->getheader) {
k->header = FALSE;
if(size > 0)
Curl_pgrsSetDownloadSize(data, size);
}
/* we want header and/or body, if neither then don't do this! */
- if(conn->bits.getheader || !conn->bits.no_body) {
+ if(k->getheader || !data->set.opt_no_body) {
if(conn->sockfd != CURL_SOCKET_BAD) {
k->keepon |= KEEP_READ;