aboutsummaryrefslogtreecommitdiff
path: root/docs/examples/fopen.c
diff options
context:
space:
mode:
authorDaniel Stenberg <daniel@haxx.se>2010-11-04 11:42:27 +0100
committerDaniel Stenberg <daniel@haxx.se>2010-11-04 11:43:50 +0100
commit809a748124cabb781b654f40e30fa51ae565f7c8 (patch)
treec1677a19ddac143bc21b067aca08734ae994a959 /docs/examples/fopen.c
parent542318b11361210e84d25e653bee3442fccf6766 (diff)
fopen.c: re-indented, fixed previous mistake
I've made the code intended using curl-style now to look more like other examples. My previous "fix" was a bit too invasive but is now fixed again.
Diffstat (limited to 'docs/examples/fopen.c')
-rw-r--r--docs/examples/fopen.c743
1 files changed, 350 insertions, 393 deletions
diff --git a/docs/examples/fopen.c b/docs/examples/fopen.c
index 874d380d7..e4505bf58 100644
--- a/docs/examples/fopen.c
+++ b/docs/examples/fopen.c
@@ -53,20 +53,24 @@
#include <curl/curl.h>
-enum fcurl_type_e { CFTYPE_NONE=0, CFTYPE_FILE=1, CFTYPE_CURL=2 };
+enum fcurl_type_e {
+ CFTYPE_NONE=0,
+ CFTYPE_FILE=1,
+ CFTYPE_CURL=2
+};
struct fcurl_data
{
- enum fcurl_type_e type; /* type of handle */
- union {
- CURL *curl;
- FILE *file;
- } handle; /* handle */
-
- char *buffer; /* buffer to store cached data*/
- int buffer_len; /* currently allocated buffers length */
- int buffer_pos; /* end of data in buffer*/
- int still_running; /* Is background url fetch still in progress */
+ enum fcurl_type_e type; /* type of handle */
+ union {
+ CURL *curl;
+ FILE *file;
+ } handle; /* handle */
+
+ char *buffer; /* buffer to store cached data*/
+ int buffer_len; /* currently allocated buffers length */
+ int buffer_pos; /* end of data in buffer*/
+ int still_running; /* Is background url fetch still in progress */
};
typedef struct fcurl_data URL_FILE;
@@ -83,488 +87,441 @@ void url_rewind(URL_FILE *file);
CURLM *multi_handle;
/* curl calls this routine to get more data */
-static size_t
-write_callback(char *buffer,
- size_t size,
- size_t nitems,
- void *userp)
+static size_t write_callback(char *buffer,
+ size_t size,
+ size_t nitems,
+ void *userp)
{
- char *newbuff;
- int rembuff;
-
- URL_FILE *url = (URL_FILE *)userp;
- size *= nitems;
-
- rembuff=url->buffer_len - url->buffer_pos; /* remaining space in buffer */
-
- if(size > rembuff)
- {
- /* not enough space in buffer */
- newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
- if(newbuff==NULL)
- {
- fprintf(stderr,"callback buffer grow failed\n");
- size=rembuff;
- }
- else
- {
- /* realloc suceeded increase buffer size*/
- url->buffer_len+=size - rembuff;
- url->buffer=newbuff;
-
- /*printf("Callback buffer grown to %d bytes\n",url->buffer_len);*/
- }
- }
+ char *newbuff;
+ int rembuff;
+
+ URL_FILE *url = (URL_FILE *)userp;
+ size *= nitems;
- memcpy(&url->buffer[url->buffer_pos], buffer, size);
- url->buffer_pos += size;
+ rembuff=url->buffer_len - url->buffer_pos; /* remaining space in buffer */
+
+ if(size > rembuff) {
+ /* not enough space in buffer */
+ newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
+ if(newbuff==NULL) {
+ fprintf(stderr,"callback buffer grow failed\n");
+ size=rembuff;
+ }
+ else {
+ /* realloc suceeded increase buffer size*/
+ url->buffer_len+=size - rembuff;
+ url->buffer=newbuff;
+ }
+ }
- /*fprintf(stderr, "callback %d size bytes\n", size);*/
+ memcpy(&url->buffer[url->buffer_pos], buffer, size);
+ url->buffer_pos += size;
- return size;
+ return size;
}
/* use to attempt to fill the read buffer up to requested number of bytes */
-static int
-fill_buffer(URL_FILE *file,int want,int waittime)
+static int fill_buffer(URL_FILE *file,int want,int waittime)
{
- fd_set fdread;
- fd_set fdwrite;
- fd_set fdexcep;
- struct timeval timeout;
- int rc;
-
- /* only attempt to fill buffer if transactions still running and buffer
- * doesnt exceed required size already
- */
- if((!file->still_running) || (file->buffer_pos > want))
- return 0;
-
- /* attempt to fill buffer */
- do
- {
- int maxfd = -1;
- long curl_timeo = -1;
-
- FD_ZERO(&fdread);
- FD_ZERO(&fdwrite);
- FD_ZERO(&fdexcep);
-
- /* set a suitable timeout to fail on */
- timeout.tv_sec = 60; /* 1 minute */
- timeout.tv_usec = 0;
-
- curl_multi_timeout(multi_handle, &curl_timeo);
- if(curl_timeo >= 0) {
- timeout.tv_sec = curl_timeo / 1000;
- if(timeout.tv_sec > 1)
- timeout.tv_sec = 1;
- else
- timeout.tv_usec = (curl_timeo % 1000) * 1000;
- }
-
- /* get file descriptors from the transfers */
- curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
-
- /* In a real-world program you OF COURSE check the return code of the
- function calls. On success, the value of maxfd is guaranteed to be
- greater or equal than -1. We call select(maxfd + 1, ...), specially
- in case of (maxfd == -1), we call select(0, ...), which is basically
- equal to sleep. */
-
- rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
-
- switch(rc) {
- case -1:
- /* select error */
- break;
-
- case 0:
- /* timeout or readable/writable sockets */
- curl_multi_perform(multi_handle, &file->still_running);
- break;
- }
- } while(file->still_running && (file->buffer_pos < want));
- return 1;
-}
+ fd_set fdread;
+ fd_set fdwrite;
+ fd_set fdexcep;
+ struct timeval timeout;
+ int rc;
+
+ /* only attempt to fill buffer if transactions still running and buffer
+ * doesnt exceed required size already
+ */
+ if((!file->still_running) || (file->buffer_pos > want))
+ return 0;
-/* use to remove want bytes from the front of a files buffer */
-static int
-use_buffer(URL_FILE *file,int want)
-{
- /* sort out buffer */
- if((file->buffer_pos - want) <=0)
- {
- /* ditch buffer - write will recreate */
- if(file->buffer)
- free(file->buffer);
-
- file->buffer=NULL;
- file->buffer_pos=0;
- file->buffer_len=0;
+ /* attempt to fill buffer */
+ do {
+ int maxfd = -1;
+ long curl_timeo = -1;
+
+ FD_ZERO(&fdread);
+ FD_ZERO(&fdwrite);
+ FD_ZERO(&fdexcep);
+
+ /* set a suitable timeout to fail on */
+ timeout.tv_sec = 60; /* 1 minute */
+ timeout.tv_usec = 0;
+
+ curl_multi_timeout(multi_handle, &curl_timeo);
+ if(curl_timeo >= 0) {
+ timeout.tv_sec = curl_timeo / 1000;
+ if(timeout.tv_sec > 1)
+ timeout.tv_sec = 1;
+ else
+ timeout.tv_usec = (curl_timeo % 1000) * 1000;
}
- else
- {
- /* move rest down make it available for later */
- memmove(file->buffer,
- &file->buffer[want],
- (file->buffer_pos - want));
-
- file->buffer_pos -= want;
+
+ /* get file descriptors from the transfers */
+ curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
+
+ /* In a real-world program you OF COURSE check the return code of the
+ function calls. On success, the value of maxfd is guaranteed to be
+ greater or equal than -1. We call select(maxfd + 1, ...), specially
+ in case of (maxfd == -1), we call select(0, ...), which is basically
+ equal to sleep. */
+
+ rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
+
+ switch(rc) {
+ case -1:
+ /* select error */
+ break;
+
+ case 0:
+ default:
+ /* timeout or readable/writable sockets */
+ curl_multi_perform(multi_handle, &file->still_running);
+ break;
}
- return 0;
+ } while(file->still_running && (file->buffer_pos < want));
+ return 1;
}
+/* use to remove want bytes from the front of a files buffer */
+static int use_buffer(URL_FILE *file,int want)
+{
+ /* sort out buffer */
+ if((file->buffer_pos - want) <=0) {
+ /* ditch buffer - write will recreate */
+ if(file->buffer)
+ free(file->buffer);
+
+ file->buffer=NULL;
+ file->buffer_pos=0;
+ file->buffer_len=0;
+ }
+ else {
+ /* move rest down make it available for later */
+ memmove(file->buffer,
+ &file->buffer[want],
+ (file->buffer_pos - want));
+
+ file->buffer_pos -= want;
+ }
+ return 0;
+}
-
-URL_FILE *
-url_fopen(const char *url,const char *operation)
+URL_FILE *url_fopen(const char *url,const char *operation)
{
- /* this code could check for URLs or types in the 'url' and
- basicly use the real fopen() for standard files */
+ /* this code could check for URLs or types in the 'url' and
+ basicly use the real fopen() for standard files */
- URL_FILE *file;
- (void)operation;
+ URL_FILE *file;
+ (void)operation;
- file = malloc(sizeof(URL_FILE));
- if(!file)
- return NULL;
+ file = malloc(sizeof(URL_FILE));
+ if(!file)
+ return NULL;
- memset(file, 0, sizeof(URL_FILE));
+ memset(file, 0, sizeof(URL_FILE));
- if((file->handle.file=fopen(url,operation)))
- {
- file->type = CFTYPE_FILE; /* marked as URL */
- }
- else
- {
- file->type = CFTYPE_CURL; /* marked as URL */
- file->handle.curl = curl_easy_init();
+ if((file->handle.file=fopen(url,operation)))
+ file->type = CFTYPE_FILE; /* marked as URL */
+
+ else {
+ file->type = CFTYPE_CURL; /* marked as URL */
+ file->handle.curl = curl_easy_init();
- curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
- curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file);
- curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L);
- curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
+ curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
+ curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file);
+ curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L);
+ curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
- if(!multi_handle)
- multi_handle = curl_multi_init();
+ if(!multi_handle)
+ multi_handle = curl_multi_init();
- curl_multi_add_handle(multi_handle, file->handle.curl);
+ curl_multi_add_handle(multi_handle, file->handle.curl);
- /* lets start the fetch */
- curl_multi_perform(multi_handle, &file->still_running);
+ /* lets start the fetch */
+ curl_multi_perform(multi_handle, &file->still_running);
- if((file->buffer_pos == 0) && (!file->still_running))
- {
- /* if still_running is 0 now, we should return NULL */
+ if((file->buffer_pos == 0) && (!file->still_running)) {
+ /* if still_running is 0 now, we should return NULL */
- /* make sure the easy handle is not in the multi handle anymore */
- curl_multi_remove_handle(multi_handle, file->handle.curl);
+ /* make sure the easy handle is not in the multi handle anymore */
+ curl_multi_remove_handle(multi_handle, file->handle.curl);
- /* cleanup */
- curl_easy_cleanup(file->handle.curl);
+ /* cleanup */
+ curl_easy_cleanup(file->handle.curl);
- free(file);
+ free(file);
- file = NULL;
- }
+ file = NULL;
}
- return file;
+ }
+ return file;
}
-int
-url_fclose(URL_FILE *file)
+int url_fclose(URL_FILE *file)
{
- int ret=0;/* default is good return */
+ int ret=0;/* default is good return */
- switch(file->type)
- {
- case CFTYPE_FILE:
- ret=fclose(file->handle.file); /* passthrough */
- break;
+ switch(file->type) {
+ case CFTYPE_FILE:
+ ret=fclose(file->handle.file); /* passthrough */
+ break;
- case CFTYPE_CURL:
- /* make sure the easy handle is not in the multi handle anymore */
- curl_multi_remove_handle(multi_handle, file->handle.curl);
+ case CFTYPE_CURL:
+ /* make sure the easy handle is not in the multi handle anymore */
+ curl_multi_remove_handle(multi_handle, file->handle.curl);
- /* cleanup */
- curl_easy_cleanup(file->handle.curl);
- break;
+ /* cleanup */
+ curl_easy_cleanup(file->handle.curl);
+ break;
- default: /* unknown or supported type - oh dear */
- ret=EOF;
- errno=EBADF;
- break;
+ default: /* unknown or supported type - oh dear */
+ ret=EOF;
+ errno=EBADF;
+ break;
+ }
- }
+ if(file->buffer)
+ free(file->buffer);/* free any allocated buffer space */
- if(file->buffer)
- free(file->buffer);/* free any allocated buffer space */
+ free(file);
- free(file);
-
- return ret;
+ return ret;
}
-int
-url_feof(URL_FILE *file)
+int url_feof(URL_FILE *file)
{
- int ret=0;
-
- switch(file->type)
- {
- case CFTYPE_FILE:
- ret=feof(file->handle.file);
- break;
-
- case CFTYPE_CURL:
- if((file->buffer_pos == 0) && (!file->still_running))
- ret = 1;
- break;
- default: /* unknown or supported type - oh dear */
- ret=-1;
- errno=EBADF;
- break;
- }
- return ret;
+ int ret=0;
+
+ switch(file->type) {
+ case CFTYPE_FILE:
+ ret=feof(file->handle.file);
+ break;
+
+ case CFTYPE_CURL:
+ if((file->buffer_pos == 0) && (!file->still_running))
+ ret = 1;
+ break;
+
+ default: /* unknown or supported type - oh dear */
+ ret=-1;
+ errno=EBADF;
+ break;
+ }
+ return ret;
}
-size_t
-url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
+size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
- size_t want;
+ size_t want;
- switch(file->type)
- {
- case CFTYPE_FILE:
- want=fread(ptr,size,nmemb,file->handle.file);
- break;
+ switch(file->type) {
+ case CFTYPE_FILE:
+ want=fread(ptr,size,nmemb,file->handle.file);
+ break;
- case CFTYPE_CURL:
- want = nmemb * size;
+ case CFTYPE_CURL:
+ want = nmemb * size;
- fill_buffer(file,want,1);
+ fill_buffer(file,want,1);
- /* check if theres data in the buffer - if not fill_buffer()
- * either errored or EOF */
- if(!file->buffer_pos)
- return 0;
+ /* check if theres data in the buffer - if not fill_buffer()
+ * either errored or EOF */
+ if(!file->buffer_pos)
+ return 0;
- /* ensure only available data is considered */
- if(file->buffer_pos < want)
- want = file->buffer_pos;
+ /* ensure only available data is considered */
+ if(file->buffer_pos < want)
+ want = file->buffer_pos;
- /* xfer data to caller */
- memcpy(ptr, file->buffer, want);
+ /* xfer data to caller */
+ memcpy(ptr, file->buffer, want);
- use_buffer(file,want);
+ use_buffer(file,want);
- want = want / size; /* number of items - nb correct op - checked
- * with glibc code*/
+ want = want / size; /* number of items */
+ break;
- /*printf("(fread) return %d bytes %d left\n", want,file->buffer_pos);*/
- break;
+ default: /* unknown or supported type - oh dear */
+ want=0;
+ errno=EBADF;
+ break;
- default: /* unknown or supported type - oh dear */
- want=0;
- errno=EBADF;
- break;
-
- }
- return want;
+ }
+ return want;
}
-char *
-url_fgets(char *ptr, int size, URL_FILE *file)
+char *url_fgets(char *ptr, int size, URL_FILE *file)
{
- int want = size - 1;/* always need to leave room for zero termination */
- int loop;
-
- switch(file->type)
- {
- case CFTYPE_FILE:
- ptr = fgets(ptr,size,file->handle.file);
+ int want = size - 1;/* always need to leave room for zero termination */
+ int loop;
+
+ switch(file->type) {
+ case CFTYPE_FILE:
+ ptr = fgets(ptr,size,file->handle.file);
+ break;
+
+ case CFTYPE_CURL:
+ fill_buffer(file,want,1);
+
+ /* check if theres data in the buffer - if not fill either errored or
+ * EOF */
+ if(!file->buffer_pos)
+ return NULL;
+
+ /* ensure only available data is considered */
+ if(file->buffer_pos < want)
+ want = file->buffer_pos;
+
+ /*buffer contains data */
+ /* look for newline or eof */
+ for(loop=0;loop < want;loop++) {
+ if(file->buffer[loop] == '\n') {
+ want=loop+1;/* include newline */
break;
+ }
+ }
- case CFTYPE_CURL:
- fill_buffer(file,want,1);
-
- /* check if theres data in the buffer - if not fill either errored or
- * EOF */
- if(!file->buffer_pos)
- return NULL;
-
- /* ensure only available data is considered */
- if(file->buffer_pos < want)
- want = file->buffer_pos;
+ /* xfer data to caller */
+ memcpy(ptr, file->buffer, want);
+ ptr[want]=0;/* allways null terminate */
- /*buffer contains data */
- /* look for newline or eof */
- for(loop=0;loop < want;loop++)
- {
- if(file->buffer[loop] == '\n')
- {
- want=loop+1;/* include newline */
- break;
- }
- }
+ use_buffer(file,want);
- /* xfer data to caller */
- memcpy(ptr, file->buffer, want);
- ptr[want]=0;/* allways null terminate */
+ break;
- use_buffer(file,want);
+ default: /* unknown or supported type - oh dear */
+ ptr=NULL;
+ errno=EBADF;
+ break;
+ }
- /*printf("(fgets) return %d bytes %d left\n", want,file->buffer_pos);*/
- break;
-
- default: /* unknown or supported type - oh dear */
- ptr=NULL;
- errno=EBADF;
- break;
- }
-
- return ptr;/*success */
+ return ptr;/*success */
}
-void
-url_rewind(URL_FILE *file)
+void url_rewind(URL_FILE *file)
{
- switch(file->type)
- {
- case CFTYPE_FILE:
- rewind(file->handle.file); /* passthrough */
- break;
-
- case CFTYPE_CURL:
- /* halt transaction */
- curl_multi_remove_handle(multi_handle, file->handle.curl);
-
- /* restart */
- curl_multi_add_handle(multi_handle, file->handle.curl);
+ switch(file->type) {
+ case CFTYPE_FILE:
+ rewind(file->handle.file); /* passthrough */
+ break;
- /* ditch buffer - write will recreate - resets stream pos*/
- if(file->buffer)
- free(file->buffer);
+ case CFTYPE_CURL:
+ /* halt transaction */
+ curl_multi_remove_handle(multi_handle, file->handle.curl);
- file->buffer=NULL;
- file->buffer_pos=0;
- file->buffer_len=0;
+ /* restart */
+ curl_multi_add_handle(multi_handle, file->handle.curl);
- break;
+ /* ditch buffer - write will recreate - resets stream pos*/
+ if(file->buffer)
+ free(file->buffer);
- default: /* unknown or supported type - oh dear */
- break;
+ file->buffer=NULL;
+ file->buffer_pos=0;
+ file->buffer_len=0;
- }
+ break;
+ default: /* unknown or supported type - oh dear */
+ break;
+ }
}
-
/* Small main program to retrive from a url using fgets and fread saving the
* output to two test files (note the fgets method will corrupt binary files if
* they contain 0 chars */
-int
-main(int argc, char *argv[])
+int main(int argc, char *argv[])
{
- URL_FILE *handle;
- FILE *outf;
+ URL_FILE *handle;
+ FILE *outf;
+
+ int nread;
+ char buffer[256];
+ const char *url;
+
+ if(argc < 2)
+ url="http://192.168.7.3/testfile";/* default to testurl */
+ else
+ url=argv[1];/* use passed url */
+
+ /* copy from url line by line with fgets */
+ outf=fopen("fgets.test","w+");
+ if(!outf) {
+ perror("couldn't open fgets output file\n");
+ return 1;
+ }
- int nread;
- char buffer[256];
- const char *url;
+ handle = url_fopen(url, "r");
+ if(!handle) {
+ printf("couldn't url_fopen() %s\n", url);
+ fclose(outf);
+ return 2;
+ }
- if(argc < 2)
- {
- url="http://192.168.7.3/testfile";/* default to testurl */
- }
- else
- {
- url=argv[1];/* use passed url */
- }
+ while(!url_feof(handle)) {
+ url_fgets(buffer,sizeof(buffer),handle);
+ fwrite(buffer,1,strlen(buffer),outf);
+ }
- /* copy from url line by line with fgets */
- outf=fopen("fgets.test","w+");
- if(!outf)
- {
- perror("couldn't open fgets output file\n");
- return 1;
- }
+ url_fclose(handle);
- handle = url_fopen(url, "r");
- if(!handle)
- {
- printf("couldn't url_fopen() %s\n", url);
- fclose(outf);
- return 2;
- }
+ fclose(outf);
- while(!url_feof(handle))
- {
- url_fgets(buffer,sizeof(buffer),handle);
- fwrite(buffer,1,strlen(buffer),outf);
- }
- url_fclose(handle);
+ /* Copy from url with fread */
+ outf=fopen("fread.test","w+");
+ if(!outf) {
+ perror("couldn't open fread output file\n");
+ return 1;
+ }
+ handle = url_fopen("testfile", "r");
+ if(!handle) {
+ printf("couldn't url_fopen() testfile\n");
fclose(outf);
+ return 2;
+ }
+ do {
+ nread = url_fread(buffer, 1,sizeof(buffer), handle);
+ fwrite(buffer,1,nread,outf);
+ } while(nread);
- /* Copy from url with fread */
- outf=fopen("fread.test","w+");
- if(!outf)
- {
- perror("couldn't open fread output file\n");
- return 1;
- }
+ url_fclose(handle);
- handle = url_fopen("testfile", "r");
- if(!handle) {
- printf("couldn't url_fopen() testfile\n");
- fclose(outf);
- return 2;
- }
+ fclose(outf);
- do {
- nread = url_fread(buffer, 1,sizeof(buffer), handle);
- fwrite(buffer,1,nread,outf);
- } while(nread);
- url_fclose(handle);
+ /* Test rewind */
+ outf=fopen("rewind.test","w+");
+ if(!outf) {
+ perror("couldn't open fread output file\n");
+ return 1;
+ }
+ handle = url_fopen("testfile", "r");
+ if(!handle) {
+ printf("couldn't url_fopen() testfile\n");
fclose(outf);
+ return 2;
+ }
+ nread = url_fread(buffer, 1,sizeof(buffer), handle);
+ fwrite(buffer,1,nread,outf);
+ url_rewind(handle);
- /* Test rewind */
- outf=fopen("rewind.test","w+");
- if(!outf)
- {
- perror("couldn't open fread output file\n");
- return 1;
- }
+ buffer[0]='\n';
+ fwrite(buffer,1,1,outf);
- handle = url_fopen("testfile", "r");
- if(!handle) {
- printf("couldn't url_fopen() testfile\n");
- fclose(outf);
- return 2;
- }
+ nread = url_fread(buffer, 1,sizeof(buffer), handle);
+ fwrite(buffer,1,nread,outf);
- nread = url_fread(buffer, 1,sizeof(buffer), handle);
- fwrite(buffer,1,nread,outf);
- url_rewind(handle);
- buffer[0]='\n';
- fwrite(buffer,1,1,outf);
+ url_fclose(handle);
- nread = url_fread(buffer, 1,sizeof(buffer), handle);
- fwrite(buffer,1,nread,outf);
-
-
- url_fclose(handle);
-
- fclose(outf);
+ fclose(outf);
- return 0;/* all done */
+ return 0;/* all done */
}