From 8646cecb785e8ac426527daedc1eb35e27f2edca Mon Sep 17 00:00:00 2001 From: Daniel Stenberg Date: Sun, 27 Sep 2009 21:34:13 +0000 Subject: - I introduced a maximum limit for received HTTP headers. It is controlled by the define CURL_MAX_HTTP_HEADER which is even exposed in the public header file to allow for users to fairly easy rebuild libcurl with a modified limit. The rationale for a fixed limit is that libcurl is realloc()ing a buffer to be able to put a full header into it, so that it can call the header callback with the entire header, but that also risk getting it into trouble if a server by mistake or willingly sends a header that is more or less without an end. The limit is set to 100K. --- lib/transfer.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/transfer.c b/lib/transfer.c index 37e45002b..405add25d 100644 --- a/lib/transfer.c +++ b/lib/transfer.c @@ -752,12 +752,22 @@ static CURLcode header_append(struct SessionHandle *data, struct SingleRequest *k, size_t length) { - if(k->hbuflen + length >= data->state.headersize) { + if(k->hbuflen + length >= data->state.headersize) { /* We enlarge the header buffer as it is too small */ char *newbuff; size_t hbufp_index; - size_t newsize=CURLMAX((k->hbuflen+ length)*3/2, - data->state.headersize*2); + size_t newsize; + + if(k->hbuflen + length > CURL_MAX_HTTP_HEADER) { + /* The reason to have a max limit for this is to avoid the risk of a bad + server feeding libcurl with a never-ending header that will cause + reallocs infinitely */ + failf (data, "Avoided giant realloc for header (max is %d)!", + CURL_MAX_HTTP_HEADER); + return CURLE_OUT_OF_MEMORY; + } + + newsize=CURLMAX((k->hbuflen+ length)*3/2, data->state.headersize*2); hbufp_index = k->hbufp - data->state.headerbuff; newbuff = realloc(data->state.headerbuff, newsize); if(!newbuff) { -- cgit v1.2.3