diff options
author | Daniel Stenberg <daniel@haxx.se> | 2004-05-10 10:49:35 +0000 |
---|---|---|
committer | Daniel Stenberg <daniel@haxx.se> | 2004-05-10 10:49:35 +0000 |
commit | 71fdc063bd170d53f442e8b0e777756a38e2fa75 (patch) | |
tree | 8391cf0747b5fbc1b7b103c9e34a124aad1a4512 | |
parent | 887d78a9ad3d6326fec2894b98d042c9d2e7fcde (diff) |
better detection for when add_buffer() returns failure, and return when that
happens
-rw-r--r-- | lib/http.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/lib/http.c b/lib/http.c index 5e663cb7d..34d79a7e2 100644 --- a/lib/http.c +++ b/lib/http.c @@ -1886,19 +1886,29 @@ CURLcode Curl_http(struct connectdata *conn) This limit is no magic limit but only set to prevent really huge POSTs to get the data duplicated with malloc() and family. */ - add_buffer(req_buffer, "\r\n", 2); /* end of headers! */ + result == add_buffer(req_buffer, "\r\n", 2); /* end of headers! */ + if(result) + return result; - if(!conn->bits.upload_chunky) + if(!conn->bits.upload_chunky) { /* We're not sending it 'chunked', append it to the request already now to reduce the number if send() calls */ - add_buffer(req_buffer, data->set.postfields, (size_t)postsize); + result = add_buffer(req_buffer, data->set.postfields, + (size_t)postsize); + } else { /* Append the POST data chunky-style */ - add_bufferf(req_buffer, "%x\r\n", (int)postsize); - add_buffer(req_buffer, data->set.postfields, (size_t)postsize); - add_buffer(req_buffer, "\r\n0\r\n\r\n", 7); /* end of a chunked - transfer stream */ + result = add_bufferf(req_buffer, "%x\r\n", (int)postsize); + if(CURLE_OK == result) + result = add_buffer(req_buffer, data->set.postfields, + (size_t)postsize); + if(CURLE_OK == result) + result = add_buffer(req_buffer, + "\r\n0\r\n\r\n", 7); /* end of a chunked + transfer stream */ } + if(result) + return result; } else { /* A huge POST coming up, do data separate from the request */ |