aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJay Satiro <raysatiro@yahoo.com>2016-12-08 16:32:36 -0500
committerJay Satiro <raysatiro@yahoo.com>2016-12-19 02:26:52 -0500
commitd00f2a8f2e8529024709f70c8ab4c4138dc651a0 (patch)
treeaa867b0c1a2d4f5f3f59541d4990adf8576a27c3 /lib
parentafb57f7b0bfe179b1d31acf652cc136349de32ec (diff)
http_proxy: Fix proxy CONNECT hang on pending data
- Check for pending data before waiting on the socket. Bug: https://github.com/curl/curl/issues/1156 Reported-by: Adam Langley
Diffstat (limited to 'lib')
-rw-r--r--lib/connect.c13
-rw-r--r--lib/connect.h2
-rw-r--r--lib/ftp.c2
-rw-r--r--lib/http_proxy.c461
-rw-r--r--lib/sendf.c11
-rw-r--r--lib/sendf.h2
6 files changed, 258 insertions, 233 deletions
diff --git a/lib/connect.c b/lib/connect.c
index 284726ae3..524d885eb 100644
--- a/lib/connect.c
+++ b/lib/connect.c
@@ -1404,3 +1404,16 @@ void Curl_conncontrol(struct connectdata *conn,
should assign this bit */
}
}
+
+/* Data received can be cached at various levels, so check them all here. */
+bool Curl_conn_data_pending(struct connectdata *conn, int sockindex)
+{
+ int readable;
+
+ if(Curl_ssl_data_pending(conn, sockindex) ||
+ Curl_recv_has_postponed_data(conn, sockindex))
+ return true;
+
+ readable = SOCKET_READABLE(conn->sock[sockindex], 0);
+ return (readable > 0 && (readable & CURL_CSELECT_IN));
+}
diff --git a/lib/connect.h b/lib/connect.h
index e01d1b35e..5653cb4e5 100644
--- a/lib/connect.h
+++ b/lib/connect.h
@@ -142,4 +142,6 @@ void Curl_conncontrol(struct connectdata *conn,
#define connkeep(x,y) Curl_conncontrol(x, CONNCTRL_KEEP)
#endif
+bool Curl_conn_data_pending(struct connectdata *conn, int sockindex);
+
#endif /* HEADER_CURL_CONNECT_H */
diff --git a/lib/ftp.c b/lib/ftp.c
index c7c275549..fd77a5acd 100644
--- a/lib/ftp.c
+++ b/lib/ftp.c
@@ -740,7 +740,7 @@ CURLcode Curl_GetFTPResponse(ssize_t *nreadp, /* return number of bytes read */
* wait for more data anyway.
*/
}
- else if(!Curl_ssl_data_pending(conn, FIRSTSOCKET)) {
+ else if(!Curl_conn_data_pending(conn, FIRSTSOCKET)) {
switch(SOCKET_READABLE(sockfd, interval_ms)) {
case -1: /* select() error, stop reading */
failf(data, "FTP response aborted due to select/poll error: %d",
diff --git a/lib/http_proxy.c b/lib/http_proxy.c
index f4e98e534..e0213f34d 100644
--- a/lib/http_proxy.c
+++ b/lib/http_proxy.c
@@ -285,7 +285,7 @@ CURLcode Curl_proxyCONNECT(struct connectdata *conn,
}
if(!blocking) {
- if(0 == SOCKET_READABLE(tunnelsocket, 0))
+ if(!Curl_conn_data_pending(conn, sockindex))
/* return so we'll be called again polling-style */
return CURLE_OK;
else {
@@ -304,13 +304,22 @@ CURLcode Curl_proxyCONNECT(struct connectdata *conn,
char *ptr;
char *line_start;
- ptr=data->state.buffer;
+ ptr = data->state.buffer;
line_start = ptr;
- nread=0;
- perline=0;
+ nread = 0;
+ perline = 0;
- while((nread<BUFSIZE) && (keepon && !error)) {
+ while(nread < BUFSIZE && keepon && !error) {
+ int writetype;
+
+ if(Curl_pgrsUpdate(conn))
+ return CURLE_ABORTED_BY_CALLBACK;
+
+ if(ptr >= &data->state.buffer[BUFSIZE]) {
+ failf(data, "CONNECT response too large!");
+ return CURLE_RECV_ERROR;
+ }
check = Curl_timeleft(data, NULL, TRUE);
if(check <= 0) {
@@ -319,244 +328,233 @@ CURLcode Curl_proxyCONNECT(struct connectdata *conn,
break;
}
- /* loop every second at least, less if the timeout is near */
- switch(SOCKET_READABLE(tunnelsocket, check<1000L?check:1000)) {
- case -1: /* select() error, stop reading */
- error = SELECT_ERROR;
- failf(data, "Proxy CONNECT aborted due to select/poll error");
- break;
- case 0: /* timeout */
+ /* Read one byte at a time to avoid a race condition. Wait at most one
+ second before looping to ensure continuous pgrsUpdates. */
+ result = Curl_read(conn, tunnelsocket, ptr, 1, &gotbytes);
+ if(result == CURLE_AGAIN) {
+ if(SOCKET_READABLE(tunnelsocket, check<1000L?check:1000) == -1) {
+ error = SELECT_ERROR;
+ failf(data, "Proxy CONNECT aborted due to select/poll error");
+ break;
+ }
+ continue;
+ }
+ else if(result) {
+ keepon = FALSE;
break;
- default:
- if(ptr >= &data->state.buffer[BUFSIZE]) {
- failf(data, "CONNECT response too large!");
- return CURLE_RECV_ERROR;
+ }
+ else if(gotbytes <= 0) {
+ if(data->set.proxyauth && data->state.authproxy.avail) {
+ /* proxy auth was requested and there was proxy auth available,
+ then deem this as "mere" proxy disconnect */
+ conn->bits.proxy_connect_closed = TRUE;
+ infof(data, "Proxy CONNECT connection closed\n");
}
- result = Curl_read(conn, tunnelsocket, ptr, 1, &gotbytes);
- if(result==CURLE_AGAIN)
- continue; /* go loop yourself */
- else if(result)
- keepon = FALSE;
- else if(gotbytes <= 0) {
- keepon = FALSE;
- if(data->set.proxyauth && data->state.authproxy.avail) {
- /* proxy auth was requested and there was proxy auth available,
- then deem this as "mere" proxy disconnect */
- conn->bits.proxy_connect_closed = TRUE;
- infof(data, "Proxy CONNECT connection closed\n");
- }
- else {
- error = SELECT_ERROR;
- failf(data, "Proxy CONNECT aborted");
+ else {
+ error = SELECT_ERROR;
+ failf(data, "Proxy CONNECT aborted");
+ }
+ keepon = FALSE;
+ break;
+ }
+
+ /* We got a byte of data */
+ nread++;
+
+ if(keepon > TRUE) {
+ /* This means we are currently ignoring a response-body */
+
+ nread = 0; /* make next read start over in the read buffer */
+ ptr = data->state.buffer;
+ if(cl) {
+ /* A Content-Length based body: simply count down the counter
+ and make sure to break out of the loop when we're done! */
+ cl--;
+ if(cl <= 0) {
+ keepon = FALSE;
+ break;
}
}
else {
- /* We got a byte of data */
- nread++;
-
- if(keepon > TRUE) {
- /* This means we are currently ignoring a response-body */
-
- nread = 0; /* make next read start over in the read buffer */
- ptr=data->state.buffer;
- if(cl) {
- /* A Content-Length based body: simply count down the counter
- and make sure to break out of the loop when we're done! */
- cl--;
- if(cl<=0) {
- keepon = FALSE;
- break;
- }
+ /* chunked-encoded body, so we need to do the chunked dance
+ properly to know when the end of the body is reached */
+ CHUNKcode r;
+ ssize_t tookcareof = 0;
+
+ /* now parse the chunked piece of data so that we can
+ properly tell when the stream ends */
+ r = Curl_httpchunk_read(conn, ptr, 1, &tookcareof);
+ if(r == CHUNKE_STOP) {
+ /* we're done reading chunks! */
+ infof(data, "chunk reading DONE\n");
+ keepon = FALSE;
+ /* we did the full CONNECT treatment, go COMPLETE */
+ conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
+ }
+ }
+ continue;
+ }
+
+ perline++; /* amount of bytes in this line so far */
+
+ /* if this is not the end of a header line then continue */
+ if(*ptr != 0x0a) {
+ ptr++;
+ continue;
+ }
+
+ /* convert from the network encoding */
+ result = Curl_convert_from_network(data, line_start, perline);
+ /* Curl_convert_from_network calls failf if unsuccessful */
+ if(result)
+ return result;
+
+ /* output debug if that is requested */
+ if(data->set.verbose)
+ Curl_debug(data, CURLINFO_HEADER_IN,
+ line_start, (size_t)perline, conn);
+
+ /* send the header to the callback */
+ writetype = CLIENTWRITE_HEADER;
+ if(data->set.include_header)
+ writetype |= CLIENTWRITE_BODY;
+
+ result = Curl_client_write(conn, writetype, line_start, perline);
+
+ data->info.header_size += (long)perline;
+ data->req.headerbytecount += (long)perline;
+
+ if(result)
+ return result;
+
+ /* Newlines are CRLF, so the CR is ignored as the line isn't
+ really terminated until the LF comes. Treat a following CR
+ as end-of-headers as well.*/
+
+ if(('\r' == line_start[0]) ||
+ ('\n' == line_start[0])) {
+ /* end of response-headers from the proxy */
+ nread = 0; /* make next read start over in the read
+ buffer */
+ ptr = data->state.buffer;
+ if((407 == k->httpcode) && !data->state.authproblem) {
+ /* If we get a 407 response code with content length
+ when we have no auth problem, we must ignore the
+ whole response-body */
+ keepon = 2;
+
+ if(cl) {
+ infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T
+ " bytes of response-body\n", cl);
+ }
+ else if(chunked_encoding) {
+ CHUNKcode r;
+
+ infof(data, "Ignore chunked response-body\n");
+
+ /* We set ignorebody true here since the chunked
+ decoder function will acknowledge that. Pay
+ attention so that this is cleared again when this
+ function returns! */
+ k->ignorebody = TRUE;
+
+ if(line_start[1] == '\n') {
+ /* this can only be a LF if the letter at index 0
+ was a CR */
+ line_start++;
}
- else {
- /* chunked-encoded body, so we need to do the chunked dance
- properly to know when the end of the body is reached */
- CHUNKcode r;
- ssize_t tookcareof=0;
-
- /* now parse the chunked piece of data so that we can
- properly tell when the stream ends */
- r = Curl_httpchunk_read(conn, ptr, 1, &tookcareof);
- if(r == CHUNKE_STOP) {
- /* we're done reading chunks! */
- infof(data, "chunk reading DONE\n");
- keepon = FALSE;
- /* we did the full CONNECT treatment, go COMPLETE */
- conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
- }
- else
- infof(data, "Read %zd bytes of chunk, continue\n",
- tookcareof);
+
+ /* now parse the chunked piece of data so that we can
+ properly tell when the stream ends */
+ r = Curl_httpchunk_read(conn, line_start + 1, 1, &gotbytes);
+ if(r == CHUNKE_STOP) {
+ /* we're done reading chunks! */
+ infof(data, "chunk reading DONE\n");
+ keepon = FALSE;
+ /* we did the full CONNECT treatment, go to
+ COMPLETE */
+ conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
}
}
else {
- perline++; /* amount of bytes in this line so far */
- if(*ptr == 0x0a) {
- int writetype;
-
- /* convert from the network encoding */
- result = Curl_convert_from_network(data, line_start, perline);
- /* Curl_convert_from_network calls failf if unsuccessful */
- if(result)
- return result;
-
- /* output debug if that is requested */
- if(data->set.verbose)
- Curl_debug(data, CURLINFO_HEADER_IN,
- line_start, (size_t)perline, conn);
-
- /* send the header to the callback */
- writetype = CLIENTWRITE_HEADER;
- if(data->set.include_header)
- writetype |= CLIENTWRITE_BODY;
-
- result = Curl_client_write(conn, writetype, line_start,
- perline);
-
- data->info.header_size += (long)perline;
- data->req.headerbytecount += (long)perline;
-
- if(result)
- return result;
-
- /* Newlines are CRLF, so the CR is ignored as the line isn't
- really terminated until the LF comes. Treat a following CR
- as end-of-headers as well.*/
-
- if(('\r' == line_start[0]) ||
- ('\n' == line_start[0])) {
- /* end of response-headers from the proxy */
- nread = 0; /* make next read start over in the read
- buffer */
- ptr=data->state.buffer;
- if((407 == k->httpcode) && !data->state.authproblem) {
- /* If we get a 407 response code with content length
- when we have no auth problem, we must ignore the
- whole response-body */
- keepon = 2;
-
- if(cl) {
- infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T
- " bytes of response-body\n", cl);
- }
- else if(chunked_encoding) {
- CHUNKcode r;
- /* We set ignorebody true here since the chunked
- decoder function will acknowledge that. Pay
- attention so that this is cleared again when this
- function returns! */
- k->ignorebody = TRUE;
-
- if(line_start[1] == '\n') {
- /* this can only be a LF if the letter at index 0
- was a CR */
- line_start++;
- }
-
- /* now parse the chunked piece of data so that we can
- properly tell when the stream ends */
- r = Curl_httpchunk_read(conn, line_start+1, 1,
- &gotbytes);
- if(r == CHUNKE_STOP) {
- /* we're done reading chunks! */
- infof(data, "chunk reading DONE\n");
- keepon = FALSE;
- /* we did the full CONNECT treatment, go to
- COMPLETE */
- conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
- }
- else
- infof(data, "Read %zd bytes of chunk, continue\n",
- gotbytes);
- }
- else {
- /* without content-length or chunked encoding, we
- can't keep the connection alive since the close is
- the end signal so we bail out at once instead */
- keepon=FALSE;
- }
- }
- else
- keepon = FALSE;
- /* we did the full CONNECT treatment, go to COMPLETE */
- conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
- break; /* breaks out of for-loop, not switch() */
- }
-
- line_start[perline]=0; /* zero terminate the buffer */
- if((checkprefix("WWW-Authenticate:", line_start) &&
- (401 == k->httpcode)) ||
- (checkprefix("Proxy-authenticate:", line_start) &&
- (407 == k->httpcode))) {
-
- bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
- char *auth = Curl_copy_header_value(line_start);
- if(!auth)
- return CURLE_OUT_OF_MEMORY;
-
- result = Curl_http_input_auth(conn, proxy, auth);
-
- free(auth);
-
- if(result)
- return result;
- }
- else if(checkprefix("Content-Length:", line_start)) {
- if(k->httpcode/100 == 2) {
- /* A server MUST NOT send any Transfer-Encoding or
- Content-Length header fields in a 2xx (Successful)
- response to CONNECT. (RFC 7231 section 4.3.6) */
- failf(data, "Content-Length: in %03d response",
- k->httpcode);
- return CURLE_RECV_ERROR;
- }
-
- cl = curlx_strtoofft(line_start +
- strlen("Content-Length:"), NULL, 10);
- }
- else if(Curl_compareheader(line_start,
- "Connection:", "close"))
- closeConnection = TRUE;
- else if(Curl_compareheader(line_start,
- "Transfer-Encoding:",
- "chunked")) {
- if(k->httpcode/100 == 2) {
- /* A server MUST NOT send any Transfer-Encoding or
- Content-Length header fields in a 2xx (Successful)
- response to CONNECT. (RFC 7231 section 4.3.6) */
- failf(data, "Transfer-Encoding: in %03d response",
- k->httpcode);
- return CURLE_RECV_ERROR;
- }
- infof(data, "CONNECT responded chunked\n");
- chunked_encoding = TRUE;
- /* init our chunky engine */
- Curl_httpchunk_init(conn);
- }
- else if(Curl_compareheader(line_start,
- "Proxy-Connection:", "close"))
- closeConnection = TRUE;
- else if(2 == sscanf(line_start, "HTTP/1.%d %d",
- &subversion,
- &k->httpcode)) {
- /* store the HTTP code from the proxy */
- data->info.httpproxycode = k->httpcode;
- }
-
- perline=0; /* line starts over here */
- ptr = data->state.buffer;
- line_start = ptr;
- }
- else /* not end of header line */
- ptr++;
+ /* without content-length or chunked encoding, we
+ can't keep the connection alive since the close is
+ the end signal so we bail out at once instead */
+ keepon = FALSE;
}
}
- break;
- } /* switch */
- if(Curl_pgrsUpdate(conn))
- return CURLE_ABORTED_BY_CALLBACK;
+ else
+ keepon = FALSE;
+ /* we did the full CONNECT treatment, go to COMPLETE */
+ conn->tunnel_state[sockindex] = TUNNEL_COMPLETE;
+ continue;
+ }
+
+ line_start[perline] = 0; /* zero terminate the buffer */
+ if((checkprefix("WWW-Authenticate:", line_start) &&
+ (401 == k->httpcode)) ||
+ (checkprefix("Proxy-authenticate:", line_start) &&
+ (407 == k->httpcode))) {
+
+ bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
+ char *auth = Curl_copy_header_value(line_start);
+ if(!auth)
+ return CURLE_OUT_OF_MEMORY;
+
+ result = Curl_http_input_auth(conn, proxy, auth);
+
+ free(auth);
+
+ if(result)
+ return result;
+ }
+ else if(checkprefix("Content-Length:", line_start)) {
+ if(k->httpcode/100 == 2) {
+ /* A server MUST NOT send any Transfer-Encoding or
+ Content-Length header fields in a 2xx (Successful)
+ response to CONNECT. (RFC 7231 section 4.3.6) */
+ failf(data, "Content-Length: in %03d response",
+ k->httpcode);
+ return CURLE_RECV_ERROR;
+ }
+
+ cl = curlx_strtoofft(line_start +
+ strlen("Content-Length:"), NULL, 10);
+ }
+ else if(Curl_compareheader(line_start, "Connection:", "close"))
+ closeConnection = TRUE;
+ else if(Curl_compareheader(line_start,
+ "Transfer-Encoding:",
+ "chunked")) {
+ if(k->httpcode/100 == 2) {
+ /* A server MUST NOT send any Transfer-Encoding or
+ Content-Length header fields in a 2xx (Successful)
+ response to CONNECT. (RFC 7231 section 4.3.6) */
+ failf(data, "Transfer-Encoding: in %03d response", k->httpcode);
+ return CURLE_RECV_ERROR;
+ }
+ infof(data, "CONNECT responded chunked\n");
+ chunked_encoding = TRUE;
+ /* init our chunky engine */
+ Curl_httpchunk_init(conn);
+ }
+ else if(Curl_compareheader(line_start, "Proxy-Connection:", "close"))
+ closeConnection = TRUE;
+ else if(2 == sscanf(line_start, "HTTP/1.%d %d",
+ &subversion,
+ &k->httpcode)) {
+ /* store the HTTP code from the proxy */
+ data->info.httpproxycode = k->httpcode;
+ }
+
+ perline = 0; /* line starts over here */
+ ptr = data->state.buffer;
+ line_start = ptr;
} /* while there's buffer left and loop is requested */
+ if(Curl_pgrsUpdate(conn))
+ return CURLE_ABORTED_BY_CALLBACK;
+
if(error)
return CURLE_RECV_ERROR;
@@ -570,8 +568,7 @@ CURLcode Curl_proxyCONNECT(struct connectdata *conn,
if(conn->bits.close)
/* the connection has been marked for closure, most likely in the
Curl_http_auth_act() function and thus we can kill it at once
- below
- */
+ below */
closeConnection = TRUE;
}
diff --git a/lib/sendf.c b/lib/sendf.c
index 29333e0e2..c3458a2b6 100644
--- a/lib/sendf.c
+++ b/lib/sendf.c
@@ -122,6 +122,13 @@ static size_t convert_lineends(struct Curl_easy *data,
#endif /* CURL_DO_LINEEND_CONV */
#ifdef USE_RECV_BEFORE_SEND_WORKAROUND
+bool Curl_recv_has_postponed_data(struct connectdata *conn, int sockindex)
+{
+ struct postponed_data * const psnd = &(conn->postponed[sockindex]);
+ return psnd->buffer && psnd->allocated_size &&
+ psnd->recv_size > psnd->recv_processed;
+}
+
static void pre_receive_plain(struct connectdata *conn, int num)
{
const curl_socket_t sockfd = conn->sock[num];
@@ -201,6 +208,10 @@ static ssize_t get_pre_recved(struct connectdata *conn, int num, char *buf,
}
#else /* ! USE_RECV_BEFORE_SEND_WORKAROUND */
/* Use "do-nothing" macros instead of functions when workaround not used */
+bool Curl_recv_has_postponed_data(struct connectdata *conn, int sockindex)
+{
+ return false;
+}
#define pre_receive_plain(c,n) do {} WHILE_FALSE
#define get_pre_recved(c,n,b,l) 0
#endif /* ! USE_RECV_BEFORE_SEND_WORKAROUND */
diff --git a/lib/sendf.h b/lib/sendf.h
index a951a0b4f..fbe4f99c8 100644
--- a/lib/sendf.h
+++ b/lib/sendf.h
@@ -56,6 +56,8 @@ CURLcode Curl_client_chop_write(struct connectdata *conn, int type, char *ptr,
CURLcode Curl_client_write(struct connectdata *conn, int type, char *ptr,
size_t len) WARN_UNUSED_RESULT;
+bool Curl_recv_has_postponed_data(struct connectdata *conn, int sockindex);
+
/* internal read-function, does plain socket only */
CURLcode Curl_read_plain(curl_socket_t sockfd,
char *buf,