aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/http.c756
-rw-r--r--lib/http.h12
-rw-r--r--lib/transfer.c770
3 files changed, 769 insertions, 769 deletions
diff --git a/lib/http.c b/lib/http.c
index 6ac2d7331..76738b880 100644
--- a/lib/http.c
+++ b/lib/http.c
@@ -2985,4 +2985,760 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
return result;
}
+
+/*
+ * checkhttpprefix()
+ *
+ * Returns TRUE if member of the list matches prefix of string
+ */
+static bool
+checkhttpprefix(struct SessionHandle *data,
+ const char *s)
+{
+ struct curl_slist *head = data->set.http200aliases;
+ bool rc = FALSE;
+#ifdef CURL_DOES_CONVERSIONS
+ /* convert from the network encoding using a scratch area */
+ char *scratch = calloc(1, strlen(s)+1);
+ if(NULL == scratch) {
+ failf (data, "Failed to calloc memory for conversion!");
+ return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
+ }
+ strcpy(scratch, s);
+ if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
+ /* Curl_convert_from_network calls failf if unsuccessful */
+ free(scratch);
+ return FALSE; /* can't return CURLE_foobar so return FALSE */
+ }
+ s = scratch;
+#endif /* CURL_DOES_CONVERSIONS */
+
+ while(head) {
+ if(checkprefix(head->data, s)) {
+ rc = TRUE;
+ break;
+ }
+ head = head->next;
+ }
+
+ if((rc != TRUE) && (checkprefix("HTTP/", s))) {
+ rc = TRUE;
+ }
+
+#ifdef CURL_DOES_CONVERSIONS
+ free(scratch);
+#endif /* CURL_DOES_CONVERSIONS */
+ return rc;
+}
+
+/*
+ * header_append() copies a chunk of data to the end of the already received
+ * header. We make sure that the full string fit in the allocated header
+ * buffer, or else we enlarge it.
+ */
+static CURLcode header_append(struct SessionHandle *data,
+ struct SingleRequest *k,
+ size_t length)
+{
+ if(k->hbuflen + length >= data->state.headersize) {
+ /* We enlarge the header buffer as it is too small */
+ char *newbuff;
+ size_t hbufp_index;
+ size_t newsize;
+
+ if(k->hbuflen + length > CURL_MAX_HTTP_HEADER) {
+ /* The reason to have a max limit for this is to avoid the risk of a bad
+ server feeding libcurl with a never-ending header that will cause
+ reallocs infinitely */
+ failf (data, "Avoided giant realloc for header (max is %d)!",
+ CURL_MAX_HTTP_HEADER);
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+ newsize=CURLMAX((k->hbuflen+ length)*3/2, data->state.headersize*2);
+ hbufp_index = k->hbufp - data->state.headerbuff;
+ newbuff = realloc(data->state.headerbuff, newsize);
+ if(!newbuff) {
+ failf (data, "Failed to alloc memory for big header!");
+ return CURLE_OUT_OF_MEMORY;
+ }
+ data->state.headersize=newsize;
+ data->state.headerbuff = newbuff;
+ k->hbufp = data->state.headerbuff + hbufp_index;
+ }
+ memcpy(k->hbufp, k->str_start, length);
+ k->hbufp += length;
+ k->hbuflen += length;
+ *k->hbufp = 0;
+
+ return CURLE_OK;
+}
+
+
+/*
+ * Read any HTTP header lines from the server and pass them to the client app.
+ */
+CURLcode Curl_http_readwrite_headers(struct SessionHandle *data,
+ struct connectdata *conn,
+ struct SingleRequest *k,
+ ssize_t *nread,
+ bool *stop_reading)
+{
+ CURLcode result;
+
+ /* header line within buffer loop */
+ do {
+ size_t rest_length;
+ size_t full_length;
+ int writetype;
+
+ /* str_start is start of line within buf */
+ k->str_start = k->str;
+
+ /* data is in network encoding so use 0x0a instead of '\n' */
+ k->end_ptr = memchr(k->str_start, 0x0a, *nread);
+
+ if(!k->end_ptr) {
+ /* Not a complete header line within buffer, append the data to
+ the end of the headerbuff. */
+ result = header_append(data, k, *nread);
+ if(result)
+ return result;
+
+ if(!k->headerline && (k->hbuflen>5)) {
+ /* make a first check that this looks like a HTTP header */
+ if(!checkhttpprefix(data, data->state.headerbuff)) {
+ /* this is not the beginning of a HTTP first header line */
+ k->header = FALSE;
+ k->badheader = HEADER_ALLBAD;
+ break;
+ }
+ }
+
+ break; /* read more and try again */
+ }
+
+ /* decrease the size of the remaining (supposed) header line */
+ rest_length = (k->end_ptr - k->str)+1;
+ *nread -= (ssize_t)rest_length;
+
+ k->str = k->end_ptr + 1; /* move past new line */
+
+ full_length = k->str - k->str_start;
+
+ result = header_append(data, k, full_length);
+ if(result)
+ return result;
+
+ k->end_ptr = k->hbufp;
+ k->p = data->state.headerbuff;
+
+ /****
+ * We now have a FULL header line that p points to
+ *****/
+
+ if(!k->headerline) {
+ /* the first read header */
+ if((k->hbuflen>5) &&
+ !checkhttpprefix(data, data->state.headerbuff)) {
+ /* this is not the beginning of a HTTP first header line */
+ k->header = FALSE;
+ if(*nread)
+ /* since there's more, this is a partial bad header */
+ k->badheader = HEADER_PARTHEADER;
+ else {
+ /* this was all we read so it's all a bad header */
+ k->badheader = HEADER_ALLBAD;
+ *nread = (ssize_t)rest_length;
+ }
+ break;
+ }
+ }
+
+ /* headers are in network encoding so
+ use 0x0a and 0x0d instead of '\n' and '\r' */
+ if((0x0a == *k->p) || (0x0d == *k->p)) {
+ size_t headerlen;
+ /* Zero-length header line means end of headers! */
+
+#ifdef CURL_DOES_CONVERSIONS
+ if(0x0d == *k->p) {
+ *k->p = '\r'; /* replace with CR in host encoding */
+ k->p++; /* pass the CR byte */
+ }
+ if(0x0a == *k->p) {
+ *k->p = '\n'; /* replace with LF in host encoding */
+ k->p++; /* pass the LF byte */
+ }
+#else
+ if('\r' == *k->p)
+ k->p++; /* pass the \r byte */
+ if('\n' == *k->p)
+ k->p++; /* pass the \n byte */
+#endif /* CURL_DOES_CONVERSIONS */
+
+ if(100 <= k->httpcode && 199 >= k->httpcode) {
+ /*
+ * We have made a HTTP PUT or POST and this is 1.1-lingo
+ * that tells us that the server is OK with this and ready
+ * to receive the data.
+ * However, we'll get more headers now so we must get
+ * back into the header-parsing state!
+ */
+ k->header = TRUE;
+ k->headerline = 0; /* restart the header line counter */
+
+ /* if we did wait for this do enable write now! */
+ if(k->exp100) {
+ k->exp100 = EXP100_SEND_DATA;
+ k->keepon |= KEEP_SEND;
+ }
+ }
+ else {
+ k->header = FALSE; /* no more header to parse! */
+
+ if((k->size == -1) && !k->chunk && !conn->bits.close &&
+ (conn->httpversion >= 11) ) {
+ /* On HTTP 1.1, when connection is not to get closed, but no
+ Content-Length nor Content-Encoding chunked have been
+ received, according to RFC2616 section 4.4 point 5, we
+ assume that the server will close the connection to
+ signal the end of the document. */
+ infof(data, "no chunk, no close, no size. Assume close to "
+ "signal end\n");
+ conn->bits.close = TRUE;
+ }
+ }
+
+ if(417 == k->httpcode) {
+ /*
+ * we got: "417 Expectation Failed" this means:
+ * we have made a HTTP call and our Expect Header
+ * seems to cause a problem => abort the write operations
+ * (or prevent them from starting).
+ */
+ k->exp100 = EXP100_FAILED;
+ k->keepon &= ~KEEP_SEND;
+ }
+
+ /*
+ * When all the headers have been parsed, see if we should give
+ * up and return an error.
+ */
+ if(Curl_http_should_fail(conn)) {
+ failf (data, "The requested URL returned error: %d",
+ k->httpcode);
+ return CURLE_HTTP_RETURNED_ERROR;
+ }
+
+ /* now, only output this if the header AND body are requested:
+ */
+ writetype = CLIENTWRITE_HEADER;
+ if(data->set.include_header)
+ writetype |= CLIENTWRITE_BODY;
+
+ headerlen = k->p - data->state.headerbuff;
+
+ result = Curl_client_write(conn, writetype,
+ data->state.headerbuff,
+ headerlen);
+ if(result)
+ return result;
+
+ data->info.header_size += (long)headerlen;
+ data->req.headerbytecount += (long)headerlen;
+
+ data->req.deductheadercount =
+ (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
+
+ if(data->state.resume_from &&
+ (data->set.httpreq==HTTPREQ_GET) &&
+ (k->httpcode == 416)) {
+ /* "Requested Range Not Satisfiable" */
+ *stop_reading = TRUE;
+ }
+
+ if(!*stop_reading) {
+ /* Curl_http_auth_act() checks what authentication methods
+ * that are available and decides which one (if any) to
+ * use. It will set 'newurl' if an auth method was picked. */
+ result = Curl_http_auth_act(conn);
+
+ if(result)
+ return result;
+
+ if(conn->bits.rewindaftersend) {
+ /* We rewind after a complete send, so thus we continue
+ sending now */
+ infof(data, "Keep sending data to get tossed away!\n");
+ k->keepon |= KEEP_SEND;
+ }
+ }
+
+ if(!k->header) {
+ /*
+ * really end-of-headers.
+ *
+ * If we requested a "no body", this is a good time to get
+ * out and return home.
+ */
+ if(data->set.opt_no_body)
+ *stop_reading = TRUE;
+ else {
+ /* If we know the expected size of this document, we set the
+ maximum download size to the size of the expected
+ document or else, we won't know when to stop reading!
+
+ Note that we set the download maximum even if we read a
+ "Connection: close" header, to make sure that
+ "Content-Length: 0" still prevents us from attempting to
+ read the (missing) response-body.
+ */
+ /* According to RFC2616 section 4.4, we MUST ignore
+ Content-Length: headers if we are now receiving data
+ using chunked Transfer-Encoding.
+ */
+ if(k->chunk)
+ k->size=-1;
+
+ }
+ if(-1 != k->size) {
+ /* We do this operation even if no_body is true, since this
+ data might be retrieved later with curl_easy_getinfo()
+ and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
+
+ Curl_pgrsSetDownloadSize(data, k->size);
+ k->maxdownload = k->size;
+ }
+ /* If max download size is *zero* (nothing) we already
+ have nothing and can safely return ok now! */
+ if(0 == k->maxdownload)
+ *stop_reading = TRUE;
+
+ if(*stop_reading) {
+ /* we make sure that this socket isn't read more now */
+ k->keepon &= ~KEEP_RECV;
+ }
+
+ if(data->set.verbose)
+ Curl_debug(data, CURLINFO_HEADER_IN,
+ k->str_start, headerlen, conn);
+ break; /* exit header line loop */
+ }
+
+ /* We continue reading headers, so reset the line-based
+ header parsing variables hbufp && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ continue;
+ }
+
+ /*
+ * Checks for special headers coming up.
+ */
+
+ if(!k->headerline++) {
+ /* This is the first header, it MUST be the error code line
+ or else we consider this to be the body right away! */
+ int httpversion_major;
+ int nc;
+#ifdef CURL_DOES_CONVERSIONS
+#define HEADER1 scratch
+#define SCRATCHSIZE 21
+ CURLcode res;
+ char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
+ /* We can't really convert this yet because we
+ don't know if it's the 1st header line or the body.
+ So we do a partial conversion into a scratch area,
+ leaving the data at k->p as-is.
+ */
+ strncpy(&scratch[0], k->p, SCRATCHSIZE);
+ scratch[SCRATCHSIZE] = 0; /* null terminate */
+ res = Curl_convert_from_network(data,
+ &scratch[0],
+ SCRATCHSIZE);
+ if(CURLE_OK != res) {
+ /* Curl_convert_from_network calls failf if unsuccessful */
+ return res;
+ }
+#else
+#define HEADER1 k->p /* no conversion needed, just use k->p */
+#endif /* CURL_DOES_CONVERSIONS */
+
+ nc = sscanf(HEADER1,
+ " HTTP/%d.%d %3d",
+ &httpversion_major,
+ &conn->httpversion,
+ &k->httpcode);
+ if(nc==3) {
+ conn->httpversion += 10 * httpversion_major;
+ }
+ else {
+ /* this is the real world, not a Nirvana
+ NCSA 1.5.x returns this crap when asked for HTTP/1.1
+ */
+ nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
+ conn->httpversion = 10;
+
+ /* If user has set option HTTP200ALIASES,
+ compare header line against list of aliases
+ */
+ if(!nc) {
+ if(checkhttpprefix(data, k->p)) {
+ nc = 1;
+ k->httpcode = 200;
+ conn->httpversion = 10;
+ }
+ }
+ }
+
+ if(nc) {
+ data->info.httpcode = k->httpcode;
+ data->info.httpversion = conn->httpversion;
+ if (!data->state.httpversion ||
+ data->state.httpversion > conn->httpversion)
+ /* store the lowest server version we encounter */
+ data->state.httpversion = conn->httpversion;
+
+ /*
+ * This code executes as part of processing the header. As a
+ * result, it's not totally clear how to interpret the
+ * response code yet as that depends on what other headers may
+ * be present. 401 and 407 may be errors, but may be OK
+ * depending on how authentication is working. Other codes
+ * are definitely errors, so give up here.
+ */
+ if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
+ ((k->httpcode != 401) || !conn->bits.user_passwd) &&
+ ((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {
+
+ if(data->state.resume_from &&
+ (data->set.httpreq==HTTPREQ_GET) &&
+ (k->httpcode == 416)) {
+ /* "Requested Range Not Satisfiable", just proceed and
+ pretend this is no error */
+ }
+ else {
+ /* serious error, go home! */
+ failf (data, "The requested URL returned error: %d",
+ k->httpcode);
+ return CURLE_HTTP_RETURNED_ERROR;
+ }
+ }
+
+ if(conn->httpversion == 10) {
+ /* Default action for HTTP/1.0 must be to close, unless
+ we get one of those fancy headers that tell us the
+ server keeps it open for us! */
+ infof(data, "HTTP 1.0, assume close after body\n");
+ conn->bits.close = TRUE;
+ }
+ else if(conn->httpversion >= 11 &&
+ !conn->bits.close) {
+ /* If HTTP version is >= 1.1 and connection is persistent
+ server supports pipelining. */
+ DEBUGF(infof(data,
+ "HTTP 1.1 or later with persistent connection, "
+ "pipelining supported\n"));
+ conn->server_supports_pipelining = TRUE;
+ }
+
+ switch(k->httpcode) {
+ case 204:
+ /* (quote from RFC2616, section 10.2.5): The server has
+ * fulfilled the request but does not need to return an
+ * entity-body ... The 204 response MUST NOT include a
+ * message-body, and thus is always terminated by the first
+ * empty line after the header fields. */
+ /* FALLTHROUGH */
+ case 416: /* Requested Range Not Satisfiable, it has the
+ Content-Length: set as the "real" document but no
+ actual response is sent. */
+ case 304:
+ /* (quote from RFC2616, section 10.3.5): The 304 response
+ * MUST NOT contain a message-body, and thus is always
+ * terminated by the first empty line after the header
+ * fields. */
+ if(data->set.timecondition)
+ data->info.timecond = TRUE;
+ k->size=0;
+ k->maxdownload=0;
+ k->ignorecl = TRUE; /* ignore Content-Length headers */
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+ }
+ else {
+ k->header = FALSE; /* this is not a header line */
+ break;
+ }
+ }
+
+#ifdef CURL_DOES_CONVERSIONS
+ /* convert from the network encoding */
+ result = Curl_convert_from_network(data, k->p, strlen(k->p));
+ if(CURLE_OK != result) {
+ return(result);
+ }
+ /* Curl_convert_from_network calls failf if unsuccessful */
+#endif /* CURL_DOES_CONVERSIONS */
+
+ /* Check for Content-Length: header lines to get size. Ignore
+ the header completely if we get a 416 response as then we're
+ resuming a document that we don't get, and this header contains
+ info about the true size of the document we didn't get now. */
+ if(!k->ignorecl && !data->set.ignorecl &&
+ checkprefix("Content-Length:", k->p)) {
+ curl_off_t contentlength = curlx_strtoofft(k->p+15, NULL, 10);
+ if(data->set.max_filesize &&
+ contentlength > data->set.max_filesize) {
+ failf(data, "Maximum file size exceeded");
+ return CURLE_FILESIZE_EXCEEDED;
+ }
+ if(contentlength >= 0) {
+ k->size = contentlength;
+ k->maxdownload = k->size;
+ /* we set the progress download size already at this point
+ just to make it easier for apps/callbacks to extract this
+ info as soon as possible */
+ Curl_pgrsSetDownloadSize(data, k->size);
+ }
+ else {
+ /* Negative Content-Length is really odd, and we know it
+ happens for example when older Apache servers send large
+ files */
+ conn->bits.close = TRUE;
+ infof(data, "Negative content-length: %" FORMAT_OFF_T
+ ", closing after transfer\n", contentlength);
+ }
+ }
+ /* check for Content-Type: header lines to get the MIME-type */
+ else if(checkprefix("Content-Type:", k->p)) {
+ char *contenttype = Curl_copy_header_value(k->p);
+ if (!contenttype)
+ return CURLE_OUT_OF_MEMORY;
+ if (!*contenttype)
+ /* ignore empty data */
+ free(contenttype);
+ else {
+ Curl_safefree(data->info.contenttype);
+ data->info.contenttype = contenttype;
+ }
+ }
+ else if((conn->httpversion == 10) &&
+ conn->bits.httpproxy &&
+ Curl_compareheader(k->p,
+ "Proxy-Connection:", "keep-alive")) {
+ /*
+ * When a HTTP/1.0 reply comes when using a proxy, the
+ * 'Proxy-Connection: keep-alive' line tells us the
+ * connection will be kept alive for our pleasure.
+ * Default action for 1.0 is to close.
+ */
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
+ }
+ else if((conn->httpversion == 11) &&
+ conn->bits.httpproxy &&
+ Curl_compareheader(k->p,
+ "Proxy-Connection:", "close")) {
+ /*
+ * We get a HTTP/1.1 response from a proxy and it says it'll
+ * close down after this transfer.
+ */
+ conn->bits.close = TRUE; /* close when done */
+ infof(data, "HTTP/1.1 proxy connection set close!\n");
+ }
+ else if((conn->httpversion == 10) &&
+ Curl_compareheader(k->p, "Connection:", "keep-alive")) {
+ /*
+ * A HTTP/1.0 reply with the 'Connection: keep-alive' line
+ * tells us the connection will be kept alive for our
+ * pleasure. Default action for 1.0 is to close.
+ *
+ * [RFC2068, section 19.7.1] */
+ conn->bits.close = FALSE; /* don't close when done */
+ infof(data, "HTTP/1.0 connection set to keep alive!\n");
+ }
+ else if(Curl_compareheader(k->p, "Connection:", "close")) {
+ /*
+ * [RFC 2616, section 8.1.2.1]
+ * "Connection: close" is HTTP/1.1 language and means that
+ * the connection will close when this request has been
+ * served.
+ */
+ conn->bits.close = TRUE; /* close when done */
+ }
+ else if(Curl_compareheader(k->p,
+ "Transfer-Encoding:", "chunked")) {
+ /*
+ * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
+ * means that the server will send a series of "chunks". Each
+ * chunk starts with line with info (including size of the
+ * coming block) (terminated with CRLF), then a block of data
+ * with the previously mentioned size. There can be any amount
+ * of chunks, and a chunk-data set to zero signals the
+ * end-of-chunks. */
+ k->chunk = TRUE; /* chunks coming our way */
+
+ /* init our chunky engine */
+ Curl_httpchunk_init(conn);
+ }
+
+ else if(checkprefix("Trailer:", k->p) ||
+ checkprefix("Trailers:", k->p)) {
+ /*
+ * This test helps Curl_httpchunk_read() to determine to look
+ * for well formed trailers after the zero chunksize record. In
+ * this case a CRLF is required after the zero chunksize record
+ * when no trailers are sent, or after the last trailer record.
+ *
+ * It seems both Trailer: and Trailers: occur in the wild.
+ */
+ k->trailerhdrpresent = TRUE;
+ }
+
+ else if(checkprefix("Content-Encoding:", k->p) &&
+ data->set.str[STRING_ENCODING]) {
+ /*
+ * Process Content-Encoding. Look for the values: identity,
+ * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
+ * x-compress are the same as gzip and compress. (Sec 3.5 RFC
+ * 2616). zlib cannot handle compress. However, errors are
+ * handled further down when the response body is processed
+ */
+ char *start;
+
+ /* Find the first non-space letter */
+ start = k->p + 17;
+ while(*start && ISSPACE(*start))
+ start++;
+
+ /* Record the content-encoding for later use */
+ if(checkprefix("identity", start))
+ k->content_encoding = IDENTITY;
+ else if(checkprefix("deflate", start))
+ k->content_encoding = DEFLATE;
+ else if(checkprefix("gzip", start)
+ || checkprefix("x-gzip", start))
+ k->content_encoding = GZIP;
+ else if(checkprefix("compress", start)
+ || checkprefix("x-compress", start))
+ k->content_encoding = COMPRESS;
+ }
+ else if(checkprefix("Content-Range:", k->p)) {
+ /* Content-Range: bytes [num]-
+ Content-Range: bytes: [num]-
+ Content-Range: [num]-
+
+ The second format was added since Sun's webserver
+ JavaWebServer/1.1.1 obviously sends the header this way!
+ The third added since some servers use that!
+ */
+
+ char *ptr = k->p + 14;
+
+ /* Move forward until first digit */
+ while(*ptr && !ISDIGIT(*ptr))
+ ptr++;
+
+ k->offset = curlx_strtoofft(ptr, NULL, 10);
+
+ if(data->state.resume_from == k->offset)
+ /* we asked for a resume and we got it */
+ k->content_range = TRUE;
+ }
+#if !defined(CURL_DISABLE_COOKIES)
+ else if(data->cookies &&
+ checkprefix("Set-Cookie:", k->p)) {
+ Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
+ CURL_LOCK_ACCESS_SINGLE);
+ Curl_cookie_add(data,
+ data->cookies, TRUE, k->p+11,
+ /* If there is a custom-set Host: name, use it
+ here, or else use real peer host name. */
+ conn->allocptr.cookiehost?
+ conn->allocptr.cookiehost:conn->host.name,
+ data->state.path);
+ Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
+ }
+#endif
+ else if(checkprefix("Last-Modified:", k->p) &&
+ (data->set.timecondition || data->set.get_filetime) ) {
+ time_t secs=time(NULL);
+ k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
+ &secs);
+ if(data->set.get_filetime)
+ data->info.filetime = (long)k->timeofdoc;
+ }
+ else if((checkprefix("WWW-Authenticate:", k->p) &&
+ (401 == k->httpcode)) ||
+ (checkprefix("Proxy-authenticate:", k->p) &&
+ (407 == k->httpcode))) {
+ result = Curl_http_input_auth(conn, k->httpcode, k->p);
+ if(result)
+ return result;
+ }
+ else if((k->httpcode >= 300 && k->httpcode < 400) &&
+ checkprefix("Location:", k->p)) {
+ /* this is the URL that the server advises us to use instead */
+ char *location = Curl_copy_header_value(k->p);
+ if (!location)
+ return CURLE_OUT_OF_MEMORY;
+ if (!*location)
+ /* ignore empty data */
+ free(location);
+ else {
+ DEBUGASSERT(!data->req.location);
+ data->req.location = location;
+
+ if(data->set.http_follow_location) {
+ DEBUGASSERT(!data->req.newurl);
+ data->req.newurl = strdup(data->req.location); /* clone */
+ if(!data->req.newurl)
+ return CURLE_OUT_OF_MEMORY;
+
+ /* some cases of POST and PUT etc needs to rewind the data
+ stream at this point */
+ result = Curl_http_perhapsrewind(conn);
+ if(result)
+ return result;
+ }
+ }
+ }
+
+ /*
+ * End of header-checks. Write them to the client.
+ */
+
+ writetype = CLIENTWRITE_HEADER;
+ if(data->set.include_header)
+ writetype |= CLIENTWRITE_BODY;
+
+ if(data->set.verbose)
+ Curl_debug(data, CURLINFO_HEADER_IN,
+ k->p, (size_t)k->hbuflen, conn);
+
+ result = Curl_client_write(conn, writetype, k->p, k->hbuflen);
+ if(result)
+ return result;
+
+ data->info.header_size += (long)k->hbuflen;
+ data->req.headerbytecount += (long)k->hbuflen;
+
+ /* reset hbufp pointer && hbuflen */
+ k->hbufp = data->state.headerbuff;
+ k->hbuflen = 0;
+ }
+ while(!*stop_reading && *k->str); /* header line within buffer */
+
+ /* We might have reached the end of the header part here, but
+ there might be a non-header part left in the end of the read
+ buffer. */
+
+ return CURLE_OK;
+}
+
#endif
diff --git a/lib/http.h b/lib/http.h
index 8908130f9..263b8989c 100644
--- a/lib/http.h
+++ b/lib/http.h
@@ -25,6 +25,8 @@
***************************************************************************/
#ifndef CURL_DISABLE_HTTP
+struct SingleRequest;
+
extern const struct Curl_handler Curl_handler_http;
#ifdef USE_SSL
@@ -123,4 +125,14 @@ struct HTTP {
points to an allocated send_buffer struct */
};
+CURLcode Curl_http_header_append(struct SessionHandle *data,
+ struct SingleRequest *k,
+ size_t length);
+
+CURLcode Curl_http_readwrite_headers(struct SessionHandle *data,
+ struct connectdata *conn,
+ struct SingleRequest *k,
+ ssize_t *nread,
+ bool *stop_reading);
+
#endif
diff --git a/lib/transfer.c b/lib/transfer.c
index 820d6816e..369481b18 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -112,15 +112,6 @@
#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
-
-#ifndef CURL_DISABLE_HTTP
-static CURLcode readwrite_http_headers(struct SessionHandle *data,
- struct connectdata *conn,
- struct SingleRequest *k,
- ssize_t *nread,
- bool *stop_reading);
-#endif /* CURL_DISABLE_HTTP */
-
/*
* This function will call the read callback to fill our buffer with data
* to upload.
@@ -261,52 +252,6 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
return CURLE_OK;
}
-#ifndef CURL_DISABLE_HTTP
-/*
- * checkhttpprefix()
- *
- * Returns TRUE if member of the list matches prefix of string
- */
-static bool
-checkhttpprefix(struct SessionHandle *data,
- const char *s)
-{
- struct curl_slist *head = data->set.http200aliases;
- bool rc = FALSE;
-#ifdef CURL_DOES_CONVERSIONS
- /* convert from the network encoding using a scratch area */
- char *scratch = calloc(1, strlen(s)+1);
- if(NULL == scratch) {
- failf (data, "Failed to calloc memory for conversion!");
- return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
- }
- strcpy(scratch, s);
- if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
- /* Curl_convert_from_network calls failf if unsuccessful */
- free(scratch);
- return FALSE; /* can't return CURLE_foobar so return FALSE */
- }
- s = scratch;
-#endif /* CURL_DOES_CONVERSIONS */
-
- while(head) {
- if(checkprefix(head->data, s)) {
- rc = TRUE;
- break;
- }
- head = head->next;
- }
-
- if((rc != TRUE) && (checkprefix("HTTP/", s))) {
- rc = TRUE;
- }
-
-#ifdef CURL_DOES_CONVERSIONS
- free(scratch);
-#endif /* CURL_DOES_CONVERSIONS */
- return rc;
-}
-#endif /* CURL_DISABLE_HTTP */
/*
* Curl_readrewind() rewinds the read stream. This is typically used for HTTP
@@ -496,7 +441,7 @@ static CURLcode readwrite_data(struct SessionHandle *data,
if(k->header) {
/* we are in parse-the-header-mode */
bool stop_reading = FALSE;
- result = readwrite_http_headers(data, conn, k, &nread, &stop_reading);
+ result = Curl_http_readwrite_headers(data, conn, k, &nread, &stop_reading);
if(result)
return result;
if(stop_reading)
@@ -754,719 +699,6 @@ static CURLcode readwrite_data(struct SessionHandle *data,
return CURLE_OK;
}
-#ifndef CURL_DISABLE_HTTP
-
-/*
- * header_append() copies a chunk of data to the end of the already received
- * header. We make sure that the full string fit in the allocated header
- * buffer, or else we enlarge it.
- */
-static CURLcode header_append(struct SessionHandle *data,
- struct SingleRequest *k,
- size_t length)
-{
- if(k->hbuflen + length >= data->state.headersize) {
- /* We enlarge the header buffer as it is too small */
- char *newbuff;
- size_t hbufp_index;
- size_t newsize;
-
- if(k->hbuflen + length > CURL_MAX_HTTP_HEADER) {
- /* The reason to have a max limit for this is to avoid the risk of a bad
- server feeding libcurl with a never-ending header that will cause
- reallocs infinitely */
- failf (data, "Avoided giant realloc for header (max is %d)!",
- CURL_MAX_HTTP_HEADER);
- return CURLE_OUT_OF_MEMORY;
- }
-
- newsize=CURLMAX((k->hbuflen+ length)*3/2, data->state.headersize*2);
- hbufp_index = k->hbufp - data->state.headerbuff;
- newbuff = realloc(data->state.headerbuff, newsize);
- if(!newbuff) {
- failf (data, "Failed to alloc memory for big header!");
- return CURLE_OUT_OF_MEMORY;
- }
- data->state.headersize=newsize;
- data->state.headerbuff = newbuff;
- k->hbufp = data->state.headerbuff + hbufp_index;
- }
- memcpy(k->hbufp, k->str_start, length);
- k->hbufp += length;
- k->hbuflen += length;
- *k->hbufp = 0;
-
- return CURLE_OK;
-}
-
-
-/*
- * Read any HTTP header lines from the server and pass them to the client app.
- */
-static CURLcode readwrite_http_headers(struct SessionHandle *data,
- struct connectdata *conn,
- struct SingleRequest *k,
- ssize_t *nread,
- bool *stop_reading)
-{
- CURLcode result;
-
- /* header line within buffer loop */
- do {
- size_t rest_length;
- size_t full_length;
- int writetype;
-
- /* str_start is start of line within buf */
- k->str_start = k->str;
-
- /* data is in network encoding so use 0x0a instead of '\n' */
- k->end_ptr = memchr(k->str_start, 0x0a, *nread);
-
- if(!k->end_ptr) {
- /* Not a complete header line within buffer, append the data to
- the end of the headerbuff. */
- result = header_append(data, k, *nread);
- if(result)
- return result;
-
- if(!k->headerline && (k->hbuflen>5)) {
- /* make a first check that this looks like a HTTP header */
- if(!checkhttpprefix(data, data->state.headerbuff)) {
- /* this is not the beginning of a HTTP first header line */
- k->header = FALSE;
- k->badheader = HEADER_ALLBAD;
- break;
- }
- }
-
- break; /* read more and try again */
- }
-
- /* decrease the size of the remaining (supposed) header line */
- rest_length = (k->end_ptr - k->str)+1;
- *nread -= (ssize_t)rest_length;
-
- k->str = k->end_ptr + 1; /* move past new line */
-
- full_length = k->str - k->str_start;
-
- result = header_append(data, k, full_length);
- if(result)
- return result;
-
- k->end_ptr = k->hbufp;
- k->p = data->state.headerbuff;
-
- /****
- * We now have a FULL header line that p points to
- *****/
-
- if(!k->headerline) {
- /* the first read header */
- if((k->hbuflen>5) &&
- !checkhttpprefix(data, data->state.headerbuff)) {
- /* this is not the beginning of a HTTP first header line */
- k->header = FALSE;
- if(*nread)
- /* since there's more, this is a partial bad header */
- k->badheader = HEADER_PARTHEADER;
- else {
- /* this was all we read so it's all a bad header */
- k->badheader = HEADER_ALLBAD;
- *nread = (ssize_t)rest_length;
- }
- break;
- }
- }
-
- /* headers are in network encoding so
- use 0x0a and 0x0d instead of '\n' and '\r' */
- if((0x0a == *k->p) || (0x0d == *k->p)) {
- size_t headerlen;
- /* Zero-length header line means end of headers! */
-
-#ifdef CURL_DOES_CONVERSIONS
- if(0x0d == *k->p) {
- *k->p = '\r'; /* replace with CR in host encoding */
- k->p++; /* pass the CR byte */
- }
- if(0x0a == *k->p) {
- *k->p = '\n'; /* replace with LF in host encoding */
- k->p++; /* pass the LF byte */
- }
-#else
- if('\r' == *k->p)
- k->p++; /* pass the \r byte */
- if('\n' == *k->p)
- k->p++; /* pass the \n byte */
-#endif /* CURL_DOES_CONVERSIONS */
-
- if(100 <= k->httpcode && 199 >= k->httpcode) {
- /*
- * We have made a HTTP PUT or POST and this is 1.1-lingo
- * that tells us that the server is OK with this and ready
- * to receive the data.
- * However, we'll get more headers now so we must get
- * back into the header-parsing state!
- */
- k->header = TRUE;
- k->headerline = 0; /* restart the header line counter */
-
- /* if we did wait for this do enable write now! */
- if(k->exp100) {
- k->exp100 = EXP100_SEND_DATA;
- k->keepon |= KEEP_SEND;
- }
- }
- else {
- k->header = FALSE; /* no more header to parse! */
-
- if((k->size == -1) && !k->chunk && !conn->bits.close &&
- (conn->httpversion >= 11) ) {
- /* On HTTP 1.1, when connection is not to get closed, but no
- Content-Length nor Content-Encoding chunked have been
- received, according to RFC2616 section 4.4 point 5, we
- assume that the server will close the connection to
- signal the end of the document. */
- infof(data, "no chunk, no close, no size. Assume close to "
- "signal end\n");
- conn->bits.close = TRUE;
- }
- }
-
- if(417 == k->httpcode) {
- /*
- * we got: "417 Expectation Failed" this means:
- * we have made a HTTP call and our Expect Header
- * seems to cause a problem => abort the write operations
- * (or prevent them from starting).
- */
- k->exp100 = EXP100_FAILED;
- k->keepon &= ~KEEP_SEND;
- }
-
- /*
- * When all the headers have been parsed, see if we should give
- * up and return an error.
- */
- if(Curl_http_should_fail(conn)) {
- failf (data, "The requested URL returned error: %d",
- k->httpcode);
- return CURLE_HTTP_RETURNED_ERROR;
- }
-
- /* now, only output this if the header AND body are requested:
- */
- writetype = CLIENTWRITE_HEADER;
- if(data->set.include_header)
- writetype |= CLIENTWRITE_BODY;
-
- headerlen = k->p - data->state.headerbuff;
-
- result = Curl_client_write(conn, writetype,
- data->state.headerbuff,
- headerlen);
- if(result)
- return result;
-
- data->info.header_size += (long)headerlen;
- data->req.headerbytecount += (long)headerlen;
-
- data->req.deductheadercount =
- (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
-
- if(data->state.resume_from &&
- (data->set.httpreq==HTTPREQ_GET) &&
- (k->httpcode == 416)) {
- /* "Requested Range Not Satisfiable" */
- *stop_reading = TRUE;
- }
-
- if(!*stop_reading) {
- /* Curl_http_auth_act() checks what authentication methods
- * that are available and decides which one (if any) to
- * use. It will set 'newurl' if an auth method was picked. */
- result = Curl_http_auth_act(conn);
-
- if(result)
- return result;
-
- if(conn->bits.rewindaftersend) {
- /* We rewind after a complete send, so thus we continue
- sending now */
- infof(data, "Keep sending data to get tossed away!\n");
- k->keepon |= KEEP_SEND;
- }
- }
-
- if(!k->header) {
- /*
- * really end-of-headers.
- *
- * If we requested a "no body", this is a good time to get
- * out and return home.
- */
- if(data->set.opt_no_body)
- *stop_reading = TRUE;
- else {
- /* If we know the expected size of this document, we set the
- maximum download size to the size of the expected
- document or else, we won't know when to stop reading!
-
- Note that we set the download maximum even if we read a
- "Connection: close" header, to make sure that
- "Content-Length: 0" still prevents us from attempting to
- read the (missing) response-body.
- */
- /* According to RFC2616 section 4.4, we MUST ignore
- Content-Length: headers if we are now receiving data
- using chunked Transfer-Encoding.
- */
- if(k->chunk)
- k->size=-1;
-
- }
- if(-1 != k->size) {
- /* We do this operation even if no_body is true, since this
- data might be retrieved later with curl_easy_getinfo()
- and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
-
- Curl_pgrsSetDownloadSize(data, k->size);
- k->maxdownload = k->size;
- }
- /* If max download size is *zero* (nothing) we already
- have nothing and can safely return ok now! */
- if(0 == k->maxdownload)
- *stop_reading = TRUE;
-
- if(*stop_reading) {
- /* we make sure that this socket isn't read more now */
- k->keepon &= ~KEEP_RECV;
- }
-
- if(data->set.verbose)
- Curl_debug(data, CURLINFO_HEADER_IN,
- k->str_start, headerlen, conn);
- break; /* exit header line loop */
- }
-
- /* We continue reading headers, so reset the line-based
- header parsing variables hbufp && hbuflen */
- k->hbufp = data->state.headerbuff;
- k->hbuflen = 0;
- continue;
- }
-
- /*
- * Checks for special headers coming up.
- */
-
- if(!k->headerline++) {
- /* This is the first header, it MUST be the error code line
- or else we consider this to be the body right away! */
- int httpversion_major;
- int nc;
-#ifdef CURL_DOES_CONVERSIONS
-#define HEADER1 scratch
-#define SCRATCHSIZE 21
- CURLcode res;
- char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
- /* We can't really convert this yet because we
- don't know if it's the 1st header line or the body.
- So we do a partial conversion into a scratch area,
- leaving the data at k->p as-is.
- */
- strncpy(&scratch[0], k->p, SCRATCHSIZE);
- scratch[SCRATCHSIZE] = 0; /* null terminate */
- res = Curl_convert_from_network(data,
- &scratch[0],
- SCRATCHSIZE);
- if(CURLE_OK != res) {
- /* Curl_convert_from_network calls failf if unsuccessful */
- return res;
- }
-#else
-#define HEADER1 k->p /* no conversion needed, just use k->p */
-#endif /* CURL_DOES_CONVERSIONS */
-
- nc = sscanf(HEADER1,
- " HTTP/%d.%d %3d",
- &httpversion_major,
- &conn->httpversion,
- &k->httpcode);
- if(nc==3) {
- conn->httpversion += 10 * httpversion_major;
- }
- else {
- /* this is the real world, not a Nirvana
- NCSA 1.5.x returns this crap when asked for HTTP/1.1
- */
- nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
- conn->httpversion = 10;
-
- /* If user has set option HTTP200ALIASES,
- compare header line against list of aliases
- */
- if(!nc) {
- if(checkhttpprefix(data, k->p)) {
- nc = 1;
- k->httpcode = 200;
- conn->httpversion = 10;
- }
- }
- }
-
- if(nc) {
- data->info.httpcode = k->httpcode;
- data->info.httpversion = conn->httpversion;
- if (!data->state.httpversion ||
- data->state.httpversion > conn->httpversion)
- /* store the lowest server version we encounter */
- data->state.httpversion = conn->httpversion;
-
- /*
- * This code executes as part of processing the header. As a
- * result, it's not totally clear how to interpret the
- * response code yet as that depends on what other headers may
- * be present. 401 and 407 may be errors, but may be OK
- * depending on how authentication is working. Other codes
- * are definitely errors, so give up here.
- */
- if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
- ((k->httpcode != 401) || !conn->bits.user_passwd) &&
- ((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {
-
- if(data->state.resume_from &&
- (data->set.httpreq==HTTPREQ_GET) &&
- (k->httpcode == 416)) {
- /* "Requested Range Not Satisfiable", just proceed and
- pretend this is no error */
- }
- else {
- /* serious error, go home! */
- failf (data, "The requested URL returned error: %d",
- k->httpcode);
- return CURLE_HTTP_RETURNED_ERROR;
- }
- }
-
- if(conn->httpversion == 10) {
- /* Default action for HTTP/1.0 must be to close, unless
- we get one of those fancy headers that tell us the
- server keeps it open for us! */
- infof(data, "HTTP 1.0, assume close after body\n");
- conn->bits.close = TRUE;
- }
- else if(conn->httpversion >= 11 &&
- !conn->bits.close) {
- /* If HTTP version is >= 1.1 and connection is persistent
- server supports pipelining. */
- DEBUGF(infof(data,
- "HTTP 1.1 or later with persistent connection, "
- "pipelining supported\n"));
- conn->server_supports_pipelining = TRUE;
- }
-
- switch(k->httpcode) {
- case 204:
- /* (quote from RFC2616, section 10.2.5): The server has
- * fulfilled the request but does not need to return an
- * entity-body ... The 204 response MUST NOT include a
- * message-body, and thus is always terminated by the first
- * empty line after the header fields. */
- /* FALLTHROUGH */
- case 416: /* Requested Range Not Satisfiable, it has the
- Content-Length: set as the "real" document but no
- actual response is sent. */
- case 304:
- /* (quote from RFC2616, section 10.3.5): The 304 response
- * MUST NOT contain a message-body, and thus is always
- * terminated by the first empty line after the header
- * fields. */
- if(data->set.timecondition)
- data->info.timecond = TRUE;
- k->size=0;
- k->maxdownload=0;
- k->ignorecl = TRUE; /* ignore Content-Length headers */
- break;
- default:
- /* nothing */
- break;
- }
- }
- else {
- k->header = FALSE; /* this is not a header line */
- break;
- }
- }
-
-#ifdef CURL_DOES_CONVERSIONS
- /* convert from the network encoding */
- result = Curl_convert_from_network(data, k->p, strlen(k->p));
- if(CURLE_OK != result) {
- return(result);
- }
- /* Curl_convert_from_network calls failf if unsuccessful */
-#endif /* CURL_DOES_CONVERSIONS */
-
- /* Check for Content-Length: header lines to get size. Ignore
- the header completely if we get a 416 response as then we're
- resuming a document that we don't get, and this header contains
- info about the true size of the document we didn't get now. */
- if(!k->ignorecl && !data->set.ignorecl &&
- checkprefix("Content-Length:", k->p)) {
- curl_off_t contentlength = curlx_strtoofft(k->p+15, NULL, 10);
- if(data->set.max_filesize &&
- contentlength > data->set.max_filesize) {
- failf(data, "Maximum file size exceeded");
- return CURLE_FILESIZE_EXCEEDED;
- }
- if(contentlength >= 0) {
- k->size = contentlength;
- k->maxdownload = k->size;
- /* we set the progress download size already at this point
- just to make it easier for apps/callbacks to extract this
- info as soon as possible */
- Curl_pgrsSetDownloadSize(data, k->size);
- }
- else {
- /* Negative Content-Length is really odd, and we know it
- happens for example when older Apache servers send large
- files */
- conn->bits.close = TRUE;
- infof(data, "Negative content-length: %" FORMAT_OFF_T
- ", closing after transfer\n", contentlength);
- }
- }
- /* check for Content-Type: header lines to get the MIME-type */
- else if(checkprefix("Content-Type:", k->p)) {
- char *contenttype = Curl_copy_header_value(k->p);
- if (!contenttype)
- return CURLE_OUT_OF_MEMORY;
- if (!*contenttype)
- /* ignore empty data */
- free(contenttype);
- else {
- Curl_safefree(data->info.contenttype);
- data->info.contenttype = contenttype;
- }
- }
- else if((conn->httpversion == 10) &&
- conn->bits.httpproxy &&
- Curl_compareheader(k->p,
- "Proxy-Connection:", "keep-alive")) {
- /*
- * When a HTTP/1.0 reply comes when using a proxy, the
- * 'Proxy-Connection: keep-alive' line tells us the
- * connection will be kept alive for our pleasure.
- * Default action for 1.0 is to close.
- */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
- }
- else if((conn->httpversion == 11) &&
- conn->bits.httpproxy &&
- Curl_compareheader(k->p,
- "Proxy-Connection:", "close")) {
- /*
- * We get a HTTP/1.1 response from a proxy and it says it'll
- * close down after this transfer.
- */
- conn->bits.close = TRUE; /* close when done */
- infof(data, "HTTP/1.1 proxy connection set close!\n");
- }
- else if((conn->httpversion == 10) &&
- Curl_compareheader(k->p, "Connection:", "keep-alive")) {
- /*
- * A HTTP/1.0 reply with the 'Connection: keep-alive' line
- * tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
- *
- * [RFC2068, section 19.7.1] */
- conn->bits.close = FALSE; /* don't close when done */
- infof(data, "HTTP/1.0 connection set to keep alive!\n");
- }
- else if(Curl_compareheader(k->p, "Connection:", "close")) {
- /*
- * [RFC 2616, section 8.1.2.1]
- * "Connection: close" is HTTP/1.1 language and means that
- * the connection will close when this request has been
- * served.
- */
- conn->bits.close = TRUE; /* close when done */
- }
- else if(Curl_compareheader(k->p,
- "Transfer-Encoding:", "chunked")) {
- /*
- * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
- * means that the server will send a series of "chunks". Each
- * chunk starts with line with info (including size of the
- * coming block) (terminated with CRLF), then a block of data
- * with the previously mentioned size. There can be any amount
- * of chunks, and a chunk-data set to zero signals the
- * end-of-chunks. */
- k->chunk = TRUE; /* chunks coming our way */
-
- /* init our chunky engine */
- Curl_httpchunk_init(conn);
- }
-
- else if(checkprefix("Trailer:", k->p) ||
- checkprefix("Trailers:", k->p)) {
- /*
- * This test helps Curl_httpchunk_read() to determine to look
- * for well formed trailers after the zero chunksize record. In
- * this case a CRLF is required after the zero chunksize record
- * when no trailers are sent, or after the last trailer record.
- *
- * It seems both Trailer: and Trailers: occur in the wild.
- */
- k->trailerhdrpresent = TRUE;
- }
-
- else if(checkprefix("Content-Encoding:", k->p) &&
- data->set.str[STRING_ENCODING]) {
- /*
- * Process Content-Encoding. Look for the values: identity,
- * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
- * x-compress are the same as gzip and compress. (Sec 3.5 RFC
- * 2616). zlib cannot handle compress. However, errors are
- * handled further down when the response body is processed
- */
- char *start;
-
- /* Find the first non-space letter */
- start = k->p + 17;
- while(*start && ISSPACE(*start))
- start++;
-
- /* Record the content-encoding for later use */
- if(checkprefix("identity", start))
- k->content_encoding = IDENTITY;
- else if(checkprefix("deflate", start))
- k->content_encoding = DEFLATE;
- else if(checkprefix("gzip", start)
- || checkprefix("x-gzip", start))
- k->content_encoding = GZIP;
- else if(checkprefix("compress", start)
- || checkprefix("x-compress", start))
- k->content_encoding = COMPRESS;
- }
- else if(checkprefix("Content-Range:", k->p)) {
- /* Content-Range: bytes [num]-
- Content-Range: bytes: [num]-
- Content-Range: [num]-
-
- The second format was added since Sun's webserver
- JavaWebServer/1.1.1 obviously sends the header this way!
- The third added since some servers use that!
- */
-
- char *ptr = k->p + 14;
-
- /* Move forward until first digit */
- while(*ptr && !ISDIGIT(*ptr))
- ptr++;
-
- k->offset = curlx_strtoofft(ptr, NULL, 10);
-
- if(data->state.resume_from == k->offset)
- /* we asked for a resume and we got it */
- k->content_range = TRUE;
- }
-#if !defined(CURL_DISABLE_COOKIES)
- else if(data->cookies &&
- checkprefix("Set-Cookie:", k->p)) {
- Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
- CURL_LOCK_ACCESS_SINGLE);
- Curl_cookie_add(data,
- data->cookies, TRUE, k->p+11,
- /* If there is a custom-set Host: name, use it
- here, or else use real peer host name. */
- conn->allocptr.cookiehost?
- conn->allocptr.cookiehost:conn->host.name,
- data->state.path);
- Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
- }
-#endif
- else if(checkprefix("Last-Modified:", k->p) &&
- (data->set.timecondition || data->set.get_filetime) ) {
- time_t secs=time(NULL);
- k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
- &secs);
- if(data->set.get_filetime)
- data->info.filetime = (long)k->timeofdoc;
- }
- else if((checkprefix("WWW-Authenticate:", k->p) &&
- (401 == k->httpcode)) ||
- (checkprefix("Proxy-authenticate:", k->p) &&
- (407 == k->httpcode))) {
- result = Curl_http_input_auth(conn, k->httpcode, k->p);
- if(result)
- return result;
- }
- else if((k->httpcode >= 300 && k->httpcode < 400) &&
- checkprefix("Location:", k->p)) {
- /* this is the URL that the server advises us to use instead */
- char *location = Curl_copy_header_value(k->p);
- if (!location)
- return CURLE_OUT_OF_MEMORY;
- if (!*location)
- /* ignore empty data */
- free(location);
- else {
- DEBUGASSERT(!data->req.location);
- data->req.location = location;
-
- if(data->set.http_follow_location) {
- DEBUGASSERT(!data->req.newurl);
- data->req.newurl = strdup(data->req.location); /* clone */
- if(!data->req.newurl)
- return CURLE_OUT_OF_MEMORY;
-
- /* some cases of POST and PUT etc needs to rewind the data
- stream at this point */
- result = Curl_http_perhapsrewind(conn);
- if(result)
- return result;
- }
- }
- }
-
- /*
- * End of header-checks. Write them to the client.
- */
-
- writetype = CLIENTWRITE_HEADER;
- if(data->set.include_header)
- writetype |= CLIENTWRITE_BODY;
-
- if(data->set.verbose)
- Curl_debug(data, CURLINFO_HEADER_IN,
- k->p, (size_t)k->hbuflen, conn);
-
- result = Curl_client_write(conn, writetype, k->p, k->hbuflen);
- if(result)
- return result;
-
- data->info.header_size += (long)k->hbuflen;
- data->req.headerbytecount += (long)k->hbuflen;
-
- /* reset hbufp pointer && hbuflen */
- k->hbufp = data->state.headerbuff;
- k->hbuflen = 0;
- }
- while(!*stop_reading && *k->str); /* header line within buffer */
-
- /* We might have reached the end of the header part here, but
- there might be a non-header part left in the end of the read
- buffer. */
-
- return CURLE_OK;
-}
-#endif /* CURL_DISABLE_HTTP */
-
/*
* Send data to upload to the server, when the socket is writable.
*/