From 96cf615e9dec951b2c4244780b5f3fe2fb303f5b Mon Sep 17 00:00:00 2001 From: Daniel Stenberg Date: Wed, 27 Oct 2004 21:29:55 +0000 Subject: Added --retry and --retry-delay first attempt with four related test cases. --- CHANGES | 10 ++++ src/main.c | 146 ++++++++++++++++++++++++++++++++++++++++++++----- tests/data/Makefile.am | 2 +- tests/data/test195 | 30 ++++++++++ tests/data/test196 | 32 +++++++++++ tests/data/test197 | 69 +++++++++++++++++++++++ tests/data/test198 | 64 ++++++++++++++++++++++ 7 files changed, 338 insertions(+), 15 deletions(-) create mode 100644 tests/data/test195 create mode 100644 tests/data/test196 create mode 100644 tests/data/test197 create mode 100644 tests/data/test198 diff --git a/CHANGES b/CHANGES index c9b7f15f0..89122c79e 100644 --- a/CHANGES +++ b/CHANGES @@ -6,6 +6,16 @@ Changelog +Daniel (27 October 2004) +- Added a --retry option to curl that takes a numerical option for the number + of times the operation should be retried. It is retried if a transient error + is detected or if a timeout occurred. By default, it will first wait one + second between the retries and then double the delay time between each retry + until the delay time is ten minutes which then will be the delay time + between all forthcoming retries. You can set a static delay time with + "--retry-delay [num]" where [num] is the number of seconds to wait between + each retry. + Daniel (25 October 2004) - Tomas Pospisek filed bug report #1053287 that proved -C - and --fail on a file that was already completely downloaded caused an error, while it diff --git a/src/main.c b/src/main.c index 75a4355c1..3caa473a1 100644 --- a/src/main.c +++ b/src/main.c @@ -362,6 +362,8 @@ static void help(void) " -r/--range Retrieve a byte range from a HTTP/1.1 or FTP server", " --random-file File for reading random data from (SSL)", " -R/--remote-time Set the remote file's time on the local output", + " --retry Retry request times if transient problems occur", + " --retry-delay When retrying, wait this many seconds between each", " -s/--silent Silent mode. Don't output anything", " -S/--show-error Show error. With -s, make curl show errors when they occur", " --socks Use SOCKS5 proxy on given host + port", @@ -445,13 +447,10 @@ struct Configurable { char *proxy; bool proxytunnel; long conf; - struct getout *url_list; /* point to the first node */ struct getout *url_last; /* point to the last/current node */ - struct getout *url_get; /* point to the node to fill in URL */ struct getout *url_out; /* point to the node to fill in outfile */ - char *cipher_list; char *cert; char *cert_type; @@ -468,7 +467,6 @@ struct Configurable { FILE *trace_stream; bool trace_fopened; bool trace_ascii; - long httpversion; bool progressmode; bool nobuffer; @@ -480,29 +478,21 @@ struct Configurable { bool proxyntlm; bool proxydigest; bool proxybasic; - char *writeout; /* %-styled format string to output */ bool writeenv; /* write results to environment, if available */ - FILE *errors; /* if stderr redirect is requested */ bool errors_fopened; - struct curl_slist *quote; struct curl_slist *postquote; struct curl_slist *prequote; - long ssl_version; long ip_version; curl_TimeCond timecond; time_t condtime; - struct curl_slist *headers; - struct curl_httppost *httppost; struct curl_httppost *last_post; - struct curl_slist *telnet_options; - HttpReq httpreq; /* for bandwidth limiting features: */ @@ -512,10 +502,11 @@ struct Configurable { size_t lastsendsize; struct timeval lastrecvtime; size_t lastrecvsize; - bool ftp_ssl; char *socks5proxy; bool tcp_nodelay; + long req_retry; /* number of retries */ + long retry_delay; /* delay between retries (in seconds) */ }; /* global variable to hold info about libcurl */ @@ -1197,6 +1188,8 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */ {"$d", "tcp-nodelay",FALSE}, {"$e", "proxy-digest", FALSE}, {"$f", "proxy-basic", FALSE}, + {"$g", "retry", TRUE}, + {"$h", "retry-delay", TRUE}, {"0", "http1.0", FALSE}, {"1", "tlsv1", FALSE}, {"2", "sslv2", FALSE}, @@ -1532,6 +1525,14 @@ static ParameterError getparameter(char *flag, /* f or -long-flag */ case 'f': /* --proxy-basic */ config->proxybasic ^= TRUE; break; + case 'g': /* --retry */ + if(str2num(&config->req_retry, nextarg)) + return PARAM_BAD_NUMERIC; + break; + case 'h': /* --retry-delay */ + if(str2num(&config->retry_delay, nextarg)) + return PARAM_BAD_NUMERIC; + break; } break; case '#': /* added 19990617 larsa */ @@ -2299,6 +2300,8 @@ struct OutStruct { char *filename; FILE *stream; struct Configurable *config; + curl_off_t bytes; /* amount written so far */ + curl_off_t init; /* original size (non-zero when appending) */ }; static int my_fwrite(void *buffer, size_t sz, size_t nmemb, void *stream) @@ -2367,6 +2370,11 @@ static int my_fwrite(void *buffer, size_t sz, size_t nmemb, void *stream) rc = fwrite(buffer, sz, nmemb, out->stream); + if((int)(sz * nmemb) == rc) { + /* we added this amount of data to the output */ + out->bytes += (sz * nmemb); + } + if(config->nobuffer) /* disable output buffering */ fflush(out->stream); @@ -2731,6 +2739,9 @@ static void FindWin32CACert(struct Configurable *config, #endif +#define RETRY_SLEEP_DEFAULT 1000 /* ms */ +#define RETRY_SLEEP_MAX 600000 /* ms == 10 minutes */ + static int operate(struct Configurable *config, int argc, char *argv[]) { @@ -2771,6 +2782,11 @@ operate(struct Configurable *config, int argc, char *argv[]) int res = 0; int i; int up; /* upload file counter within a single upload glob */ + int retry_sleep_default = config->retry_delay? + config->retry_delay*1000:RETRY_SLEEP_DEFAULT; /* ms */ + int retry_numretries; + int retry_sleep = retry_sleep_default; + long response; char *env; #ifdef CURLDEBUG @@ -2974,6 +2990,7 @@ operate(struct Configurable *config, int argc, char *argv[]) /* default output stream is stdout */ outs.stream = stdout; outs.config = config; + outs.bytes = 0; /* nothing written yet */ /* save outfile pattern before expansion */ outfiles = urlnode->outfile?strdup(urlnode->outfile):NULL; @@ -3105,6 +3122,7 @@ operate(struct Configurable *config, int argc, char *argv[]) outs.filename = outfile; if(config->resume_from) { + outs.init = config->resume_from; /* open file for output: */ outs.stream=(FILE *) fopen(outfile, config->resume_from?"ab":"wb"); if (!outs.stream) { @@ -3471,7 +3489,107 @@ operate(struct Configurable *config, int argc, char *argv[]) curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5); } - res = curl_easy_perform(curl); + retry_numretries = config->req_retry; + + do { + res = curl_easy_perform(curl); + + if(retry_numretries) { + enum { + RETRY_NO, + RETRY_TIMEOUT, + RETRY_HTTP, + RETRY_FTP, + RETRY_LAST /* not used */ + } retry = RETRY_NO; + if(CURLE_OPERATION_TIMEDOUT == res) + /* retry timeout always */ + retry = RETRY_TIMEOUT; + else if(CURLE_OK == res) { + /* Check for HTTP transient errors */ + char *url=NULL; + curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &url); + if(url && + curlx_strnequal(url, "http", 4)) { + /* This was HTTP(S) */ + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response); + + switch(response) { + case 500: /* Internal Server Error */ + case 502: /* Bad Gateway */ + case 503: /* Service Unavailable */ + case 504: /* Gateway Timeout */ + retry = RETRY_HTTP; + /* + * At this point, we have already written data to the output + * file (or terminal). If we write to a file, we must rewind + * or close/re-open the file so that the next attempt starts + * over from the beginning. + * + * TODO: similar action for the upload case. We might need + * to start over reading from a previous point if we have + * uploaded something when this was returned. + */ + break; + } + } + } /* if CURLE_OK */ + else if(CURLE_FTP_USER_PASSWORD_INCORRECT == res) { + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response); + + if(response/100 == 5) + /* + * This is typically when the FTP server only allows a certain + * amount of users and we are not one of them. It mostly + * returns 530 in this case, but all 5xx codes are transient. + */ + retry = RETRY_FTP; + } + + if(retry) { + if(!(config->conf&CONF_MUTE)) { + static const char *m[]={NULL, + "timeout", + "HTTP error", + "FTP error" + }; + fprintf(stderr, "Transient problem: %s\n" + "Will retry in %d seconds. " + "%d retries left.\n", + m[retry], + retry_sleep/1000, + retry_numretries); + } + go_sleep(retry_sleep); + retry_numretries--; + if(!config->retry_delay) { + retry_sleep *= 2; + if(retry_sleep > RETRY_SLEEP_MAX) + retry_sleep = RETRY_SLEEP_MAX; + } + if(outs.bytes && outs.filename) { + /* We have written data to a output file, we truncate file + */ + if(!(config->conf&CONF_MUTE)) + fprintf(stderr, "Throwing away " CURL_FORMAT_OFF_T + " bytes\n", outs.bytes); + fflush(outs.stream); + /* truncate file at the position where we started appending */ + ftruncate( fileno(outs.stream), outs.init); + /* now seek to the end of the file, the position where we + just truncated the file */ + fseek(outs.stream, 0, SEEK_END); + outs.bytes = 0; /* clear for next round */ + } + continue; + } + } /* if retry_numretries */ + + /* In all ordinary cases, just break out of loop here */ + retry_sleep = retry_sleep_default; + break; + + } while(1); if((config->progressmode == CURL_PROGRESS_BAR) && progressbar.calls) { diff --git a/tests/data/Makefile.am b/tests/data/Makefile.am index 7298caf7a..cd1d82bd3 100644 --- a/tests/data/Makefile.am +++ b/tests/data/Makefile.am @@ -27,7 +27,7 @@ EXTRA_DIST = test1 test108 test117 test127 test20 test27 test34 test46 \ test172 test204 test205 test173 test174 test175 test176 test177 \ test513 test514 test178 test179 test180 test181 test182 test183 \ test184 test185 test186 test187 test188 test189 test191 test192 \ - test193 test194 + test193 test194 test195 test196 test197 test198 # The following tests have been removed from the dist since they no longer # work. We need to fix the test suite's FTPS server first, then bring them diff --git a/tests/data/test195 b/tests/data/test195 new file mode 100644 index 000000000..c736e53b3 --- /dev/null +++ b/tests/data/test195 @@ -0,0 +1,30 @@ +# Server-side + + + +# Client-side + + +ftp + + +FTP response 530 after PASS, temporarily not allowed access + + +ftp://%HOSTIP:%FTPPORT/195 + + +REPLY PASS 530 temporarily not available + + + +# Verify data after the test has been "shot" + + +10 + + +USER anonymous +PASS curl_by_daniel@haxx.se + + diff --git a/tests/data/test196 b/tests/data/test196 new file mode 100644 index 000000000..06eeeb5f9 --- /dev/null +++ b/tests/data/test196 @@ -0,0 +1,32 @@ +# Server-side + + + +# Client-side + + +ftp + + +FTP transient error, retry request once + + +ftp://%HOSTIP:%FTPPORT/196 --retry 1 + + +REPLY PASS 530 temporarily not available + + + +# Verify data after the test has been "shot" + + +10 + + +USER anonymous +PASS curl_by_daniel@haxx.se +USER anonymous +PASS curl_by_daniel@haxx.se + + diff --git a/tests/data/test197 b/tests/data/test197 new file mode 100644 index 000000000..2f73e8e92 --- /dev/null +++ b/tests/data/test197 @@ -0,0 +1,69 @@ +# +# Server-side + + +HTTP/1.1 503 OK swsbounce +Date: Thu, 09 Nov 2010 14:49:00 GMT +Content-Length: 21 + +server not available + + +HTTP/1.1 200 OK +Date: Thu, 09 Nov 2010 14:49:00 GMT +Content-Length: 3 +Connection: close + +ok + + + + +# +# Client-side + + +http + + +HTTP GET --retry on 503 error with output to stdout + + +http://%HOSTIP:%HTTPPORT/197 --retry 1000 + + + +# +# Verify data after the test has been "shot" + + +^User-Agent:.* + + +GET /197 HTTP/1.1 +Host: 127.0.0.1:%HTTPPORT +Pragma: no-cache +Accept: */* + +GET /197 HTTP/1.1 +Host: 127.0.0.1:%HTTPPORT +Pragma: no-cache +Accept: */* + + + + +HTTP/1.1 503 OK swsbounce +Date: Thu, 09 Nov 2010 14:49:00 GMT +Content-Length: 21 + +server not available +HTTP/1.1 200 OK +Date: Thu, 09 Nov 2010 14:49:00 GMT +Content-Length: 3 +Connection: close + +ok + + + diff --git a/tests/data/test198 b/tests/data/test198 new file mode 100644 index 000000000..90d52ded3 --- /dev/null +++ b/tests/data/test198 @@ -0,0 +1,64 @@ +# +# Server-side + + +HTTP/1.1 503 OK swsbounce +Date: Thu, 09 Nov 2010 14:49:00 GMT +Content-Length: 21 + +server not available + + +HTTP/1.1 200 OK +Date: Thu, 09 Nov 2010 14:49:00 GMT +Content-Length: 3 +Connection: close + +ok + + + +HTTP/1.1 200 OK +Date: Thu, 09 Nov 2010 14:49:00 GMT +Content-Length: 3 +Connection: close + +ok + + + + +# +# Client-side + + +http + + +HTTP GET --retry on 503 error with output to file + + +http://%HOSTIP:%HTTPPORT/198 --retry 1000 + + + +# +# Verify data after the test has been "shot" + + +^User-Agent:.* + + +GET /198 HTTP/1.1 +Host: 127.0.0.1:%HTTPPORT +Pragma: no-cache +Accept: */* + +GET /198 HTTP/1.1 +Host: 127.0.0.1:%HTTPPORT +Pragma: no-cache +Accept: */* + + + + -- cgit v1.2.3