From b903186fa0189ff241d756d25d07fdfe9885ae49 Mon Sep 17 00:00:00 2001 From: Daniel Stenberg Date: Wed, 20 Apr 2011 15:17:42 +0200 Subject: source cleanup: unify look, style and indent levels By the use of a the new lib/checksrc.pl script that checks that our basic source style rules are followed. --- lib/transfer.c | 49 +++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 24 deletions(-) (limited to 'lib/transfer.c') diff --git a/lib/transfer.c b/lib/transfer.c index ee817568c..41f0de4ed 100644 --- a/lib/transfer.c +++ b/lib/transfer.c @@ -509,18 +509,18 @@ static CURLcode readwrite_data(struct SessionHandle *data, "Rewinding stream by : %zd" " bytes on url %s (zero-length body)\n", nread, data->state.path); - read_rewind(conn, (size_t)nread); + read_rewind(conn, (size_t)nread); } - else { + else { infof(data, "Excess found in a non pipelined read:" " excess = %zd" - " url = %s (zero-length body)\n", + " url = %s (zero-length body)\n", nread, data->state.path); - } - } + } + } - break; + break; } } #endif /* CURL_DISABLE_HTTP */ @@ -629,7 +629,8 @@ static CURLcode readwrite_data(struct SessionHandle *data, dataleft = conn->chunk.dataleft; if(dataleft != 0) { infof(conn->data, "Leftovers after chunking: %zu bytes", dataleft); - if(conn->data->multi && Curl_multi_canPipeline(conn->data->multi)) { + if(conn->data->multi && + Curl_multi_canPipeline(conn->data->multi)) { /* only attempt the rewind if we truly are pipelining */ infof(conn->data, "Rewinding %zu bytes\n",dataleft); read_rewind(conn, dataleft); @@ -1235,19 +1236,19 @@ long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps, curl_off_t min_sleep = 0; curl_off_t rv = 0; - if (rate_bps == 0) + if(rate_bps == 0) return 0; /* If running faster than about .1% of the desired speed, slow * us down a bit. Use shift instead of division as the 0.1% * cutoff is arbitrary anyway. */ - if (cur_rate_bps > (rate_bps + (rate_bps >> 10))) { + if(cur_rate_bps > (rate_bps + (rate_bps >> 10))) { /* running too fast, decrease target rate by 1/64th of rate */ rate_bps -= rate_bps >> 6; min_sleep = 1; } - else if (cur_rate_bps < (rate_bps - (rate_bps >> 10))) { + else if(cur_rate_bps < (rate_bps - (rate_bps >> 10))) { /* running too slow, increase target rate by 1/64th of rate */ rate_bps += rate_bps >> 6; } @@ -1261,7 +1262,7 @@ long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps, /* Catch rounding errors and always slow down at least 1ms if * we are running too fast. */ - if (rv < min_sleep) + if(rv < min_sleep) rv = min_sleep; /* Bound value to fit in 'long' on 32-bit platform. That's @@ -1329,14 +1330,14 @@ Transfer(struct connectdata *conn) k->keepon &= ~KEEP_SEND_HOLD; } else { - if (data->set.upload && data->set.max_send_speed && - (data->progress.ulspeed > data->set.max_send_speed) ) { + if(data->set.upload && data->set.max_send_speed && + (data->progress.ulspeed > data->set.max_send_speed) ) { /* calculate upload rate-limitation timeout. */ buffersize = (int)(data->set.buffer_size ? data->set.buffer_size : BUFSIZE); totmp = (int)Curl_sleep_time(data->set.max_send_speed, data->progress.ulspeed, buffersize); - if (totmp < timeout_ms) + if(totmp < timeout_ms) timeout_ms = totmp; } fd_write = CURL_SOCKET_BAD; @@ -1350,14 +1351,14 @@ Transfer(struct connectdata *conn) k->keepon &= ~KEEP_RECV_HOLD; } else { - if ((!data->set.upload) && data->set.max_recv_speed && - (data->progress.dlspeed > data->set.max_recv_speed)) { + if((!data->set.upload) && data->set.max_recv_speed && + (data->progress.dlspeed > data->set.max_recv_speed)) { /* Calculate download rate-limitation timeout. */ buffersize = (int)(data->set.buffer_size ? data->set.buffer_size : BUFSIZE); totmp = (int)Curl_sleep_time(data->set.max_recv_speed, data->progress.dlspeed, buffersize); - if (totmp < timeout_ms) + if(totmp < timeout_ms) timeout_ms = totmp; } fd_read = CURL_SOCKET_BAD; @@ -1393,7 +1394,7 @@ Transfer(struct connectdata *conn) else if(!totmp) totmp = 1000; - if (totmp < timeout_ms) + if(totmp < timeout_ms) timeout_ms = totmp; } @@ -1815,7 +1816,7 @@ CURLcode Curl_follow(struct SessionHandle *data, free(data->change.referer); data->change.referer = strdup(data->change.url); - if (!data->change.referer) { + if(!data->change.referer) { data->change.referer_alloc = FALSE; return CURLE_OUT_OF_MEMORY; } @@ -1829,7 +1830,7 @@ CURLcode Curl_follow(struct SessionHandle *data, to be absolute and this doesn't seem to be that! */ char *absolute = concat_url(data->change.url, newurl); - if (!absolute) + if(!absolute) return CURLE_OUT_OF_MEMORY; free(newurl); newurl = absolute; @@ -1845,7 +1846,7 @@ CURLcode Curl_follow(struct SessionHandle *data, size_t newlen = strlen_url(newurl); newest = malloc(newlen+1); /* get memory for this */ - if (!newest) + if(!newest) return CURLE_OUT_OF_MEMORY; strcpy_url(newest, newurl); /* create a space-free URL */ @@ -2164,7 +2165,7 @@ static CURLcode Curl_do_perform(struct SessionHandle *data) res = CURLE_OK; follow = FOLLOW_RETRY; } - else if (res == CURLE_OK) { + else if(res == CURLE_OK) { /* * We must duplicate the new URL here as the connection data may * be free()ed in the Curl_done() function. We prefer the newurl @@ -2174,13 +2175,13 @@ static CURLcode Curl_do_perform(struct SessionHandle *data) if(data->req.newurl) { follow = FOLLOW_REDIR; newurl = strdup(data->req.newurl); - if (!newurl) + if(!newurl) res = CURLE_OUT_OF_MEMORY; } else if(data->req.location) { follow = FOLLOW_FAKE; newurl = strdup(data->req.location); - if (!newurl) + if(!newurl) res = CURLE_OUT_OF_MEMORY; } } -- cgit v1.2.3