aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Stenberg <daniel@haxx.se>2001-04-18 07:25:11 +0000
committerDaniel Stenberg <daniel@haxx.se>2001-04-18 07:25:11 +0000
commit3fd65fb7d83a8e3e6acd1a40c48b46088ebd536f (patch)
tree13701e723f62ff38452b3c6096eaa98444d9d190
parentebcafe73b313e70b19e4f7b806e020e59f84c5b1 (diff)
Remade resume stuff to keep data in the connectdata struct instead of the
main handle struct to work with persistant connections
-rw-r--r--lib/file.c4
-rw-r--r--lib/ftp.c49
-rw-r--r--lib/http.c28
-rw-r--r--lib/progress.c14
-rw-r--r--lib/progress.h4
-rw-r--r--lib/transfer.c10
-rw-r--r--lib/url.c47
-rw-r--r--lib/urldata.h14
8 files changed, 97 insertions, 73 deletions
diff --git a/lib/file.c b/lib/file.c
index 516659662..452980f54 100644
--- a/lib/file.c
+++ b/lib/file.c
@@ -183,11 +183,11 @@ CURLcode Curl_file(struct connectdata *conn)
return res;
now = Curl_tvnow();
- if(Curl_pgrsUpdate(data))
+ if(Curl_pgrsUpdate(conn))
res = CURLE_ABORTED_BY_CALLBACK;
}
now = Curl_tvnow();
- if(Curl_pgrsUpdate(data))
+ if(Curl_pgrsUpdate(conn))
res = CURLE_ABORTED_BY_CALLBACK;
close(fd);
diff --git a/lib/ftp.c b/lib/ftp.c
index 8fda9f477..43c53a859 100644
--- a/lib/ftp.c
+++ b/lib/ftp.c
@@ -1272,7 +1272,7 @@ again:;
CURLE_FTP_COULDNT_SET_BINARY;
}
- if(data->resume_from) {
+ if(conn->resume_from) {
/* we're about to continue the uploading of a file */
/* 1. get already existing file's size. We use the SIZE
command for this which may not exist in the server!
@@ -1286,7 +1286,7 @@ again:;
/* 4. lower the infilesize counter */
/* => transfer as usual */
- if(data->resume_from < 0 ) {
+ if(conn->resume_from < 0 ) {
/* we could've got a specified offset from the command line,
but now we know we didn't */
@@ -1302,10 +1302,10 @@ again:;
}
/* get the size from the ascii string: */
- data->resume_from = atoi(buf+4);
+ conn->resume_from = atoi(buf+4);
}
- if(data->resume_from) {
+ if(conn->resume_from) {
/* do we still game? */
int passed=0;
/* enable append instead */
@@ -1315,7 +1315,7 @@ again:;
input. If we knew it was a proper file we could've just
fseek()ed but we only have a stream here */
do {
- int readthisamountnow = (data->resume_from - passed);
+ int readthisamountnow = (conn->resume_from - passed);
int actuallyread;
if(readthisamountnow > BUFSIZE)
@@ -1331,11 +1331,11 @@ again:;
return CURLE_FTP_COULDNT_USE_REST;
}
}
- while(passed != data->resume_from);
+ while(passed != conn->resume_from);
/* now, decrease the size of the read */
if(data->infilesize>0) {
- data->infilesize -= data->resume_from;
+ data->infilesize -= conn->resume_from;
if(data->infilesize <= 0) {
failf(data, "File already completely uploaded\n");
@@ -1387,13 +1387,13 @@ again:;
bool dirlist=FALSE;
long downloadsize=-1;
- if(data->bits.set_range && data->range) {
+ if(conn->bits.use_range && conn->range) {
long from, to;
int totalsize=-1;
char *ptr;
char *ptr2;
- from=strtol(data->range, &ptr, 0);
+ from=strtol(conn->range, &ptr, 0);
while(ptr && *ptr && (isspace((int)*ptr) || (*ptr=='-')))
ptr++;
to=strtol(ptr, &ptr2, 0);
@@ -1403,22 +1403,23 @@ again:;
}
if((-1 == to) && (from>=0)) {
/* X - */
- data->resume_from = from;
+ conn->resume_from = from;
infof(data, "FTP RANGE %d to end of file\n", from);
}
else if(from < 0) {
/* -Y */
totalsize = -from;
conn->maxdownload = -from;
- data->resume_from = from;
+ conn->resume_from = from;
infof(data, "FTP RANGE the last %d bytes\n", totalsize);
}
else {
/* X-Y */
totalsize = to-from;
conn->maxdownload = totalsize+1; /* include the last mentioned byte */
- data->resume_from = from;
- infof(data, "FTP RANGE from %d getting %d bytes\n", from, conn->maxdownload);
+ conn->resume_from = from;
+ infof(data, "FTP RANGE from %d getting %d bytes\n", from,
+ conn->maxdownload);
}
infof(data, "range-download from %d to %d, totally %d bytes\n",
from, to, totalsize);
@@ -1466,7 +1467,7 @@ again:;
CURLE_FTP_COULDNT_SET_BINARY;
}
- if(data->resume_from) {
+ if(conn->resume_from) {
/* Daniel: (August 4, 1999)
*
@@ -1491,26 +1492,26 @@ again:;
int foundsize=atoi(buf+4);
/* We got a file size report, so we check that there actually is a
part of the file left to get, or else we go home. */
- if(data->resume_from< 0) {
+ if(conn->resume_from< 0) {
/* We're supposed to download the last abs(from) bytes */
- if(foundsize < -data->resume_from) {
+ if(foundsize < -conn->resume_from) {
failf(data, "Offset (%d) was beyond file size (%d)",
- data->resume_from, foundsize);
+ conn->resume_from, foundsize);
return CURLE_FTP_BAD_DOWNLOAD_RESUME;
}
/* convert to size to download */
- downloadsize = -data->resume_from;
+ downloadsize = -conn->resume_from;
/* download from where? */
- data->resume_from = foundsize - downloadsize;
+ conn->resume_from = foundsize - downloadsize;
}
else {
- if(foundsize < data->resume_from) {
+ if(foundsize < conn->resume_from) {
failf(data, "Offset (%d) was beyond file size (%d)",
- data->resume_from, foundsize);
+ conn->resume_from, foundsize);
return CURLE_FTP_BAD_DOWNLOAD_RESUME;
}
/* Now store the number of bytes we are expected to download */
- downloadsize = foundsize-data->resume_from;
+ downloadsize = foundsize-conn->resume_from;
}
}
@@ -1521,9 +1522,9 @@ again:;
/* Set resume file transfer offset */
infof(data, "Instructs server to resume from offset %d\n",
- data->resume_from);
+ conn->resume_from);
- ftpsendf(conn->firstsocket, conn, "REST %d", data->resume_from);
+ ftpsendf(conn->firstsocket, conn, "REST %d", conn->resume_from);
nread = Curl_GetFTPResponse(conn->firstsocket, buf, conn, &ftpcode);
if(nread < 0)
diff --git a/lib/http.c b/lib/http.c
index 58d03ac65..1f17b9c00 100644
--- a/lib/http.c
+++ b/lib/http.c
@@ -497,7 +497,7 @@ CURLcode Curl_http(struct connectdata *conn)
if((data->bits.http_post ||
data->bits.http_formpost ||
data->bits.http_put) &&
- data->resume_from) {
+ conn->resume_from) {
/**********************************************************************
* Resuming upload in HTTP means that we PUT or POST and that we have
* got a resume_from value set. The resume value has already created
@@ -506,15 +506,15 @@ CURLcode Curl_http(struct connectdata *conn)
* file size before we continue this venture in the dark lands of HTTP.
*********************************************************************/
- if(data->resume_from < 0 ) {
+ if(conn->resume_from < 0 ) {
/*
* This is meant to get the size of the present remote-file by itself.
* We don't support this now. Bail out!
*/
- data->resume_from = 0;
+ conn->resume_from = 0;
}
- if(data->resume_from) {
+ if(conn->resume_from) {
/* do we still game? */
int passed=0;
@@ -522,7 +522,7 @@ CURLcode Curl_http(struct connectdata *conn)
input. If we knew it was a proper file we could've just
fseek()ed but we only have a stream here */
do {
- int readthisamountnow = (data->resume_from - passed);
+ int readthisamountnow = (conn->resume_from - passed);
int actuallyread;
if(readthisamountnow > BUFSIZE)
@@ -537,11 +537,11 @@ CURLcode Curl_http(struct connectdata *conn)
passed);
return CURLE_READ_ERROR;
}
- } while(passed != data->resume_from); /* loop until done */
+ } while(passed != conn->resume_from); /* loop until done */
/* now, decrease the size of the read */
if(data->infilesize>0) {
- data->infilesize -= data->resume_from;
+ data->infilesize -= conn->resume_from;
if(data->infilesize <= 0) {
failf(data, "File already completely uploaded\n");
@@ -551,7 +551,7 @@ CURLcode Curl_http(struct connectdata *conn)
/* we've passed, proceed as normal */
}
}
- if(data->bits.set_range) {
+ if(conn->bits.use_range) {
/*
* A range is selected. We use different headers whether we're downloading
* or uploading and we always let customized headers override our internal
@@ -559,23 +559,23 @@ CURLcode Curl_http(struct connectdata *conn)
*/
if((data->httpreq == HTTPREQ_GET) &&
!checkheaders(data, "Range:")) {
- conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n", data->range);
+ conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n", conn->range);
}
else if((data->httpreq != HTTPREQ_GET) &&
!checkheaders(data, "Content-Range:")) {
- if(data->resume_from) {
+ if(conn->resume_from) {
/* This is because "resume" was selected */
- long total_expected_size= data->resume_from + data->infilesize;
+ long total_expected_size= conn->resume_from + data->infilesize;
conn->allocptr.rangeline = aprintf("Content-Range: bytes %s%ld/%ld\r\n",
- data->range, total_expected_size-1,
+ conn->range, total_expected_size-1,
total_expected_size);
}
else {
/* Range was selected and then we just pass the incoming range and
append total size */
conn->allocptr.rangeline = aprintf("Content-Range: bytes %s/%d\r\n",
- data->range, data->infilesize);
+ conn->range, data->infilesize);
}
}
}
@@ -610,7 +610,7 @@ CURLcode Curl_http(struct connectdata *conn)
conn->allocptr.proxyuserpwd)?conn->allocptr.proxyuserpwd:"",
(conn->bits.user_passwd && conn->allocptr.userpwd)?
conn->allocptr.userpwd:"",
- (data->bits.set_range && conn->allocptr.rangeline)?
+ (conn->bits.use_range && conn->allocptr.rangeline)?
conn->allocptr.rangeline:"",
(data->useragent && *data->useragent && conn->allocptr.uagent)?
conn->allocptr.uagent:"",
diff --git a/lib/progress.c b/lib/progress.c
index 6451da6f4..9fbd45c34 100644
--- a/lib/progress.c
+++ b/lib/progress.c
@@ -91,11 +91,12 @@ static char *max5data(double bytes, char *max5)
*/
-void Curl_pgrsDone(struct UrlData *data)
+void Curl_pgrsDone(struct connectdata *conn)
{
+ struct UrlData *data = conn->data;
if(!(data->progress.flags & PGRS_HIDE)) {
data->progress.lastshow=0;
- Curl_pgrsUpdate(data); /* the final (forced) update */
+ Curl_pgrsUpdate(conn); /* the final (forced) update */
fprintf(data->err, "\n");
}
}
@@ -172,7 +173,7 @@ void Curl_pgrsSetUploadSize(struct UrlData *data, double size)
*/
-int Curl_pgrsUpdate(struct UrlData *data)
+int Curl_pgrsUpdate(struct connectdata *conn)
{
struct timeval now;
int result;
@@ -185,6 +186,8 @@ int Curl_pgrsUpdate(struct UrlData *data)
double total_transfer;
double total_expected_transfer;
+ struct UrlData *data = conn->data;
+
int nowindex = data->progress.speeder_c% CURR_TIME;
int checkindex;
int count;
@@ -198,15 +201,16 @@ int Curl_pgrsUpdate(struct UrlData *data)
double total_estimate;
+
if(data->progress.flags & PGRS_HIDE)
; /* We do enter this function even if we don't wanna see anything, since
this is were lots of the calculations are being made that will be used
even when not displayed! */
else if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
if (!data->progress.callback) {
- if(data->resume_from)
+ if(conn->resume_from)
fprintf(data->err, "** Resuming transfer from byte position %d\n",
- data->resume_from);
+ conn->resume_from);
fprintf(data->err,
" %% Total %% Received %% Xferd Average Speed Time Curr.\n"
" Dload Upload Total Current Left Speed\n");
diff --git a/lib/progress.h b/lib/progress.h
index 04b3d6c24..55ceb7422 100644
--- a/lib/progress.h
+++ b/lib/progress.h
@@ -36,13 +36,13 @@ typedef enum {
TIMER_LAST /* must be last */
} timerid;
-void Curl_pgrsDone(struct UrlData *data);
+void Curl_pgrsDone(struct connectdata *);
void Curl_pgrsStartNow(struct UrlData *data);
void Curl_pgrsSetDownloadSize(struct UrlData *data, double size);
void Curl_pgrsSetUploadSize(struct UrlData *data, double size);
void Curl_pgrsSetDownloadCounter(struct UrlData *data, double size);
void Curl_pgrsSetUploadCounter(struct UrlData *data, double size);
-int Curl_pgrsUpdate(struct UrlData *data);
+int Curl_pgrsUpdate(struct connectdata *);
void Curl_pgrsTime(struct UrlData *data, timerid timer);
diff --git a/lib/transfer.c b/lib/transfer.c
index cde665c85..52fde048f 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -465,7 +465,7 @@ Transfer(struct connectdata *c_conn)
/* This second format was added August 1st 2000 by Igor
Khristophorov since Sun's webserver JavaWebServer/1.1.1
obviously sends the header this way! :-( */
- if (data->resume_from == offset) {
+ if (conn->resume_from == offset) {
/* we asked for a resume and we got it */
content_range = TRUE;
}
@@ -547,7 +547,7 @@ Transfer(struct connectdata *c_conn)
infof (data, "Follow to new URL: %s\n", conn->newurl);
return CURLE_OK;
}
- else if (data->resume_from &&
+ else if (conn->resume_from &&
!content_range &&
(data->httpreq==HTTPREQ_GET)) {
/* we wanted to resume a download, although the server
@@ -557,7 +557,7 @@ Transfer(struct connectdata *c_conn)
"byte ranges. Cannot resume.");
return CURLE_HTTP_RANGE_ERROR;
}
- else if(data->timecondition && !data->range) {
+ else if(data->timecondition && !conn->range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
@@ -696,7 +696,7 @@ Transfer(struct connectdata *c_conn)
}
now = Curl_tvnow();
- if(Curl_pgrsUpdate(data))
+ if(Curl_pgrsUpdate(conn))
urg = CURLE_ABORTED_BY_CALLBACK;
else
urg = Curl_speedcheck (data, now);
@@ -730,7 +730,7 @@ Transfer(struct connectdata *c_conn)
conn->proto.http->chunk.datasize);
return CURLE_PARTIAL_FILE;
}
- if(Curl_pgrsUpdate(data))
+ if(Curl_pgrsUpdate(conn))
return CURLE_ABORTED_BY_CALLBACK;
if(conn->bytecountp)
diff --git a/lib/url.c b/lib/url.c
index 8880c1eb2..4fa180dea 100644
--- a/lib/url.c
+++ b/lib/url.c
@@ -153,13 +153,6 @@ CURLcode Curl_close(CURL *curl)
data->bits.httpproxy=FALSE;
}
-
- if(data->bits.rangestringalloc) {
- free(data->range);
- data->range=NULL;
- data->bits.rangestringalloc=0; /* free now */
- }
-
/* check for allocated [URL] memory to free: */
if(data->freethis)
free(data->freethis);
@@ -703,14 +696,14 @@ CURLcode Curl_setopt(CURL *curl, CURLoption option, ...)
/*
* What range of the file you want to transfer
*/
- data->range = va_arg(param, char *);
- data->bits.set_range = data->range?1:0;
+ data->set_range = va_arg(param, char *);
+ data->bits.set_range = data->set_range?1:0;
break;
case CURLOPT_RESUME_FROM:
/*
* Resume transfer at the give file position
*/
- data->resume_from = va_arg(param, long);
+ data->set_resume_from = va_arg(param, long);
break;
case CURLOPT_STDERR:
/*
@@ -801,6 +794,16 @@ CURLcode Curl_disconnect(struct connectdata *conn)
if(!conn)
return CURLE_OK; /* this is closed and fine already */
+ /*
+ * The range string is usually freed in curl_done(), but we might
+ * get here *instead* if we fail prematurely. Thus we need to be able
+ * to free this resource here as well.
+ */
+ if(conn->bits.rangestringalloc) {
+ free(conn->range);
+ conn->bits.rangestringalloc = FALSE;
+ }
+
if(-1 != conn->connectindex) {
/* unlink ourselves! */
infof(conn->data, "Closing live connection (#%d)\n", conn->connectindex);
@@ -1348,6 +1351,9 @@ static CURLcode Connect(struct UrlData *data,
conn->secondarysocket = -1; /* no file descriptor */
conn->connectindex = -1; /* no index */
conn->bits.httpproxy = data->bits.httpproxy; /* proxy-or-not status */
+ conn->bits.use_range = data->bits.set_range; /* range status */
+ conn->range = data->set_range; /* clone the range setting */
+ conn->resume_from = data->set_resume_from; /* inherite resume_from */
/* Default protocol-independent behavior doesn't support persistant
connections, so we set this to force-close. Protocols that support
@@ -1650,13 +1656,13 @@ static CURLcode Connect(struct UrlData *data,
* server, we just fail since we can't rewind the file writing from within
* this function.
***********************************************************/
- if(data->resume_from) {
- if(!data->bits.set_range) {
+ if(conn->resume_from) {
+ if(!conn->bits.use_range) {
/* if it already was in use, we just skip this */
- snprintf(resumerange, sizeof(resumerange), "%d-", data->resume_from);
- data->range=strdup(resumerange); /* tell ourselves to fetch this range */
- data->bits.rangestringalloc = TRUE; /* mark as allocated */
- data->bits.set_range = 1; /* switch on range usage */
+ snprintf(resumerange, sizeof(resumerange), "%d-", conn->resume_from);
+ conn->range=strdup(resumerange); /* tell ourselves to fetch this range */
+ conn->bits.rangestringalloc = TRUE; /* mark as allocated */
+ conn->bits.use_range = 1; /* switch on range usage */
}
}
@@ -2210,13 +2216,20 @@ CURLcode Curl_done(struct connectdata *conn)
struct UrlData *data=conn->data;
CURLcode result;
+ /* cleanups done even if the connection is re-used */
+
+ if(conn->bits.rangestringalloc) {
+ free(conn->range);
+ conn->bits.rangestringalloc = FALSE;
+ }
+
/* this calls the protocol-specific function pointer previously set */
if(conn->curl_done)
result = conn->curl_done(conn);
else
result = CURLE_OK;
- Curl_pgrsDone(data); /* done with the operation */
+ Curl_pgrsDone(conn); /* done with the operation */
/* if data->bits.reuse_forbid is TRUE, it means the libcurl client has
forced us to close this no matter what we think.
diff --git a/lib/urldata.h b/lib/urldata.h
index 5c019946f..8a0c1f19b 100644
--- a/lib/urldata.h
+++ b/lib/urldata.h
@@ -185,6 +185,9 @@ struct ConnectBits {
bool httpproxy; /* if set, this transfer is done through a http proxy */
bool user_passwd; /* do we use user+password for this connection? */
bool proxy_user_passwd; /* user+password for the proxy? */
+
+ bool use_range;
+ bool rangestringalloc; /* the range string is malloc()'ed */
};
/*
@@ -228,6 +231,10 @@ struct connectdata {
char *ppath;
long bytecount;
+ char *range; /* range, if used. See README for detailed specification on
+ this syntax. */
+ int resume_from; /* continue [ftp] transfer from here */
+
char *proxyhost; /* name of the http proxy host */
struct timeval now; /* "current" time */
@@ -401,7 +408,6 @@ struct Configbits {
bool this_is_a_follow; /* this is a followed Location: request */
bool krb4; /* kerberos4 connection requested */
bool proxystringalloc; /* the http proxy string is malloc()'ed */
- bool rangestringalloc; /* the range string is malloc()'ed */
bool urlstringalloc; /* the URL string is malloc()'ed */
bool reuse_forbid; /* if this is forbidden to be reused, close
after use */
@@ -460,8 +466,8 @@ struct UrlData {
struct ssl_config_data ssl; /* this is for ssl-stuff */
char *userpwd; /* <user:password>, if used */
- char *range; /* range, if used. See README for detailed specification on
- this syntax. */
+ char *set_range; /* range, if used. See README for detailed specification on
+ this syntax. */
/* stuff related to HTTP */
@@ -510,7 +516,7 @@ struct UrlData {
long low_speed_limit; /* bytes/second */
long low_speed_time; /* number of seconds */
- int resume_from; /* continue [ftp] transfer from here */
+ int set_resume_from; /* continue [ftp] transfer from here */
char *cookie; /* HTTP cookie string to send */