diff options
-rw-r--r-- | CHANGES | 7 | ||||
-rw-r--r-- | RELEASE-NOTES | 1 | ||||
-rw-r--r-- | lib/ftp.c | 63 | ||||
-rw-r--r-- | lib/urldata.h | 7 | ||||
-rw-r--r-- | tests/data/test142 | 163 |
5 files changed, 205 insertions, 36 deletions
@@ -6,6 +6,13 @@ Changelog +Daniel (15 April 2004) +- Removed the fixed maximum amount of dir levels the FTP code supported. + Previously we had a fixed array for 100 levels, now we save space in each + handle by allocating only for a few level by default and then enlarging that + in case of need (with no maximum depth). Adjusted test case 142 to verify + that 150 dir levels work fine. + Daniel (14 April 2004) - Asking for CURL_IPRESOLVE_V6 when ipv6 addresses can't be resolved will now cause the resolve function to return NULL immediately. This flaw was diff --git a/RELEASE-NOTES b/RELEASE-NOTES index 9caf178c5..e8914b2f7 100644 --- a/RELEASE-NOTES +++ b/RELEASE-NOTES @@ -7,6 +7,7 @@ Curl and libcurl 7.11.2. A bugfix release. This release includes the following changes: + o removed maximum dir depth limit in the FTP code o the ares build now requires c-ares 1.2.0 or later o --tcp-nodelay and CURLOPT_TCP_NODELAY were added o curl/curlver.h contains the libcurl version info now @@ -124,9 +124,19 @@ static CURLcode ftp_cwd_and_mkd(struct connectdata *conn, char *path); static void freedirs(struct FTP *ftp) { int i; - for (i=0; ftp->dirs[i]; i++){ - free(ftp->dirs[i]); - ftp->dirs[i]=NULL; + if(ftp->dirs) { + for (i=0; i < ftp->dirdepth; i++){ + if(ftp->dirs[i]) { + free(ftp->dirs[i]); + ftp->dirs[i]=NULL; + } + } + free(ftp->dirs); + ftp->dirs = NULL; + } + if(ftp->file) { + free(ftp->file); + ftp->file = NULL; } } @@ -732,14 +742,9 @@ CURLcode Curl_ftp_done(struct connectdata *conn) int ftpcode; CURLcode result=CURLE_OK; - /* free the dir tree parts */ + /* free the dir tree and file parts */ freedirs(ftp); - if(ftp->file) { - free(ftp->file); - ftp->file = NULL; - } - if(data->set.upload) { if((-1 != data->set.infilesize) && (data->set.infilesize != *ftp->bytecountp) && @@ -2317,7 +2322,6 @@ CURLcode Curl_ftp(struct connectdata *conn) char *slash_pos; /* position of the first '/' char in curpos */ char *cur_pos=conn->ppath; /* current position in ppath. point at the begin of next path component */ - int path_part=0;/* current path component */ /* the ftp struct is already inited in ftp_connect() */ ftp = conn->proto.ftp; @@ -2329,23 +2333,28 @@ CURLcode Curl_ftp(struct connectdata *conn) Curl_pgrsSetUploadSize(data, 0); Curl_pgrsSetDownloadSize(data, 0); - /* fixed : initialize ftp->dirs[xxx] to NULL ! - is done in Curl_ftp_connect() */ - + ftp->dirdepth = 0; + ftp->diralloc = 5; /* default dir depth to allocate */ + ftp->dirs = (char **)malloc(ftp->diralloc * sizeof(ftp->dirs[0])); + if(!ftp->dirs) + return CURLE_OUT_OF_MEMORY; + ftp->dirs[0] = NULL; /* to start with */ + /* parse the URL path into separate path components */ while((slash_pos=strchr(cur_pos, '/'))) { /* 1 or 0 to indicate absolute directory */ - bool absolute_dir = (cur_pos - conn->ppath > 0) && (path_part == 0); + bool absolute_dir = (cur_pos - conn->ppath > 0) && (ftp->dirdepth == 0); /* seek out the next path component */ if (slash_pos-cur_pos) { /* we skip empty path components, like "x//y" since the FTP command CWD requires a parameter and a non-existant parameter a) doesn't work on many servers and b) has no effect on the others. */ - ftp->dirs[path_part] = curl_unescape(cur_pos - absolute_dir, - slash_pos - cur_pos + absolute_dir); + ftp->dirs[ftp->dirdepth] = curl_unescape(cur_pos - absolute_dir, + slash_pos - cur_pos + + absolute_dir); - if (!ftp->dirs[path_part]) { /* run out of memory ... */ + if (!ftp->dirs[ftp->dirdepth]) { /* run out of memory ... */ failf(data, "no memory"); freedirs(ftp); return CURLE_OUT_OF_MEMORY; @@ -2358,12 +2367,16 @@ CURLcode Curl_ftp(struct connectdata *conn) if(!retcode) { cur_pos = slash_pos + 1; /* jump to the rest of the string */ - if(++path_part >= (CURL_MAX_FTP_DIRDEPTH-1)) { - /* too deep, we need the last entry to be kept NULL at all - times to signal end of list */ - failf(data, "too deep dir hierarchy"); - freedirs(ftp); - return CURLE_URL_MALFORMAT; + if(++ftp->dirdepth >= ftp->diralloc) { + /* enlarge array */ + char *bigger; + ftp->diralloc *= 2; /* double the size each time */ + bigger = realloc(ftp->dirs, ftp->diralloc * sizeof(ftp->dirs[0])); + if(!bigger) { + freedirs(ftp); + return CURLE_OUT_OF_MEMORY; + } + ftp->dirs = (char **)bigger; } } } @@ -2511,10 +2524,6 @@ CURLcode Curl_ftp_disconnect(struct connectdata *conn) free(ftp->cache); ftp->cache = NULL; } - if(ftp->file) { - free(ftp->file); - ftp->file = NULL; /* zero */ - } freedirs(ftp); } return CURLE_OK; diff --git a/lib/urldata.h b/lib/urldata.h index f138fc48c..1da835126 100644 --- a/lib/urldata.h +++ b/lib/urldata.h @@ -107,9 +107,6 @@ of need. */ #define HEADERSIZE 256 -/* Maximum number of dirs supported by libcurl in a FTP dir hierarchy */ -#define CURL_MAX_FTP_DIRDEPTH 100 - /* Just a convenience macro to get the larger value out of two given. We prefix with CURL to prevent name collisions. */ #define CURLMAX(x,y) ((x)>(y)?(x):(y)) @@ -244,7 +241,9 @@ struct FTP { char *user; /* user name string */ char *passwd; /* password string */ char *urlpath; /* the originally given path part of the URL */ - char *dirs[CURL_MAX_FTP_DIRDEPTH]; /* path components */ + char **dirs; /* realloc()ed array for path components */ + int dirdepth; /* number of entries used in the 'dirs' array */ + int diralloc; /* number of entries allocated for the 'dirs' array */ char *file; /* decoded file */ char *entrypath; /* the PWD reply when we logged on */ diff --git a/tests/data/test142 b/tests/data/test142 index b9470ab3f..b69550bcc 100644 --- a/tests/data/test142 +++ b/tests/data/test142 @@ -1,6 +1,7 @@ # Server-side <reply> <data> +many parts there are </data> </reply> @@ -10,21 +11,173 @@ ftp </server> <name> -FTP URL with too deep (100+) dir hierarchy +FTP URL with 150 dir levels </name> <command> -ftp://%HOSTIP:%FTPPORT/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/b +ftp://%HOSTIP:%FTPPORT/part1/part2/part3/part4/part5/part6/part7/part8/part9/part10/part11/part12/part13/part14/part15/part16/part17/part18/part19/part20/part21/part22/part23/part24/part25/part26/part27/part28/part29/part30/part31/part32/part33/part34/part35/part36/part37/part38/part39/part40/part41/part42/part43/part44/part45/part46/part47/part48/part49/part50/part51/part52/part53/part54/part55/part56/part57/part58/part59/part60/part61/part62/part63/part64/part65/part66/part67/part68/part69/part70/part71/part72/part73/part74/part75/part76/part77/part78/part79/part80/part81/part82/part83/part84/part85/part86/part87/part88/part89/part90/part91/part92/part93/part94/part95/part96/part97/part98/part99/part100/part101/part102/part103/part104/part105/part106/part107/part108/part109/part110/part111/part112/part113/part114/part115/part116/part117/part118/part119/part120/part121/part122/part123/part124/part125/part126/part127/part128/part129/part130/part131/part132/part133/part134/part135/part136/part137/part138/part139/part140/part141/part142/part143/part144/part145/part146/part147/part148/part149/part150/142 </command> </test> # Verify data after the test has been "shot" <verify> -<errorcode> -3 -</errorcode> <protocol> USER anonymous
PASS curl_by_daniel@haxx.se
PWD
+CWD part1
+CWD part2
+CWD part3
+CWD part4
+CWD part5
+CWD part6
+CWD part7
+CWD part8
+CWD part9
+CWD part10
+CWD part11
+CWD part12
+CWD part13
+CWD part14
+CWD part15
+CWD part16
+CWD part17
+CWD part18
+CWD part19
+CWD part20
+CWD part21
+CWD part22
+CWD part23
+CWD part24
+CWD part25
+CWD part26
+CWD part27
+CWD part28
+CWD part29
+CWD part30
+CWD part31
+CWD part32
+CWD part33
+CWD part34
+CWD part35
+CWD part36
+CWD part37
+CWD part38
+CWD part39
+CWD part40
+CWD part41
+CWD part42
+CWD part43
+CWD part44
+CWD part45
+CWD part46
+CWD part47
+CWD part48
+CWD part49
+CWD part50
+CWD part51
+CWD part52
+CWD part53
+CWD part54
+CWD part55
+CWD part56
+CWD part57
+CWD part58
+CWD part59
+CWD part60
+CWD part61
+CWD part62
+CWD part63
+CWD part64
+CWD part65
+CWD part66
+CWD part67
+CWD part68
+CWD part69
+CWD part70
+CWD part71
+CWD part72
+CWD part73
+CWD part74
+CWD part75
+CWD part76
+CWD part77
+CWD part78
+CWD part79
+CWD part80
+CWD part81
+CWD part82
+CWD part83
+CWD part84
+CWD part85
+CWD part86
+CWD part87
+CWD part88
+CWD part89
+CWD part90
+CWD part91
+CWD part92
+CWD part93
+CWD part94
+CWD part95
+CWD part96
+CWD part97
+CWD part98
+CWD part99
+CWD part100
+CWD part101
+CWD part102
+CWD part103
+CWD part104
+CWD part105
+CWD part106
+CWD part107
+CWD part108
+CWD part109
+CWD part110
+CWD part111
+CWD part112
+CWD part113
+CWD part114
+CWD part115
+CWD part116
+CWD part117
+CWD part118
+CWD part119
+CWD part120
+CWD part121
+CWD part122
+CWD part123
+CWD part124
+CWD part125
+CWD part126
+CWD part127
+CWD part128
+CWD part129
+CWD part130
+CWD part131
+CWD part132
+CWD part133
+CWD part134
+CWD part135
+CWD part136
+CWD part137
+CWD part138
+CWD part139
+CWD part140
+CWD part141
+CWD part142
+CWD part143
+CWD part144
+CWD part145
+CWD part146
+CWD part147
+CWD part148
+CWD part149
+CWD part150
+EPSV
+TYPE I
+SIZE 142
+RETR 142
+QUIT
</protocol> </verify> |