aboutsummaryrefslogtreecommitdiff
path: root/tests/unit
diff options
context:
space:
mode:
authordfandrich <dan@coneharvesters.com>2015-11-12 20:40:26 +0100
committerDan Fandrich <dan@coneharvesters.com>2015-11-12 22:49:32 +0100
commitd7e352402cf21da54933392372f7a9232d3d7f23 (patch)
tree9c0e27b29c0abdeac9ad952b88749db4dd84e598 /tests/unit
parent278ea24a7a6461a97c3c5d9b37fc9fdd3a9802f4 (diff)
unit1603: Added unit tests for hash functions
Diffstat (limited to 'tests/unit')
-rw-r--r--tests/unit/Makefile.inc5
-rw-r--r--tests/unit/unit1305.c2
-rw-r--r--tests/unit/unit1603.c151
3 files changed, 155 insertions, 3 deletions
diff --git a/tests/unit/Makefile.inc b/tests/unit/Makefile.inc
index 9073b34e6..056a8fbf2 100644
--- a/tests/unit/Makefile.inc
+++ b/tests/unit/Makefile.inc
@@ -7,7 +7,7 @@ UNITFILES = curlcheck.h \
# These are all unit test programs
UNITPROGS = unit1300 unit1301 unit1302 unit1303 unit1304 unit1305 unit1307 \
unit1308 unit1309 unit1330 unit1394 unit1395 unit1396 unit1397 unit1398 \
- unit1600 unit1601 unit1602
+ unit1600 unit1601 unit1602 unit1603
unit1300_SOURCES = unit1300.c $(UNITFILES)
unit1300_CPPFLAGS = $(AM_CPPFLAGS)
@@ -66,3 +66,6 @@ unit1601_CPPFLAGS = $(AM_CPPFLAGS)
unit1602_SOURCES = unit1602.c $(UNITFILES)
unit1602_CPPFLAGS = $(AM_CPPFLAGS)
+unit1603_SOURCES = unit1603.c $(UNITFILES)
+unit1603_CPPFLAGS = $(AM_CPPFLAGS)
+
diff --git a/tests/unit/unit1305.c b/tests/unit/unit1305.c
index 96913f1fa..9db488812 100644
--- a/tests/unit/unit1305.c
+++ b/tests/unit/unit1305.c
@@ -134,8 +134,6 @@ UNITTEST_START
abort_unless(nodep, "insertion into hash failed");
/* Freeing will now be done by Curl_hash_destroy */
data_node = NULL;
-
- /* To do: test retrieval, deletion, edge conditions */
}
UNITTEST_STOP
diff --git a/tests/unit/unit1603.c b/tests/unit/unit1603.c
new file mode 100644
index 000000000..27a08a73f
--- /dev/null
+++ b/tests/unit/unit1603.c
@@ -0,0 +1,151 @@
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at http://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+#include "curlcheck.h"
+
+#define ENABLE_CURLX_PRINTF
+#include "curlx.h"
+
+#include "hash.h"
+
+#include "memdebug.h" /* LAST include file */
+
+static struct curl_hash hash_static;
+static const int slots = 3;
+
+static void mydtor(void *p)
+{
+ /* Data are statically allocated */
+ (void)p; /* unused */
+}
+
+static CURLcode unit_setup( void )
+{
+ return Curl_hash_init(&hash_static, slots, Curl_hash_str,
+ Curl_str_key_compare, mydtor);
+}
+
+static void unit_stop( void )
+{
+ Curl_hash_destroy(&hash_static);
+}
+
+UNITTEST_START
+ char key1[] = "key1";
+ char key2[] = "key2b";
+ char key3[] = "key3";
+ char key4[] = "key4";
+ char notakey[] = "notakey";
+ char *nodep;
+ int rc;
+
+ /* Ensure the key1 hashes are as expected in order to test both hash
+ collisions and a full table */
+ fail_unless(Curl_hash_str(key1, strlen(key1), slots) == 1,
+ "hashes are not computed as expected");
+ fail_unless(Curl_hash_str(key2, strlen(key2), slots) == 0,
+ "hashes are not computed as expected");
+ fail_unless(Curl_hash_str(key3, strlen(key3), slots) == 2,
+ "hashes are not computed as expected");
+ fail_unless(Curl_hash_str(key4, strlen(key4), slots) == 1,
+ "hashes are not computed as expected");
+
+ nodep = Curl_hash_add(&hash_static, &key1, strlen(key1), &key1);
+ fail_unless(nodep, "insertion into hash failed");
+ nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
+ fail_unless(nodep == key1, "hash retrieval failed");
+
+ nodep = Curl_hash_add(&hash_static, &key2, strlen(key2), &key2);
+ fail_unless(nodep, "insertion into hash failed");
+ nodep = Curl_hash_pick(&hash_static, &key2, strlen(key2));
+ fail_unless(nodep == key2, "hash retrieval failed");
+
+ nodep = Curl_hash_add(&hash_static, &key3, strlen(key3), &key3);
+ fail_unless(nodep, "insertion into hash failed");
+ nodep = Curl_hash_pick(&hash_static, &key3, strlen(key3));
+ fail_unless(nodep == key3, "hash retrieval failed");
+
+ /* The fourth element exceeds the number of slots & collides */
+ nodep = Curl_hash_add(&hash_static, &key4, strlen(key4), &key4);
+ fail_unless(nodep, "insertion into hash failed");
+ nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
+ fail_unless(nodep == key4, "hash retrieval failed");
+
+ /* Make sure all elements are still accessible */
+ nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
+ fail_unless(nodep == key1, "hash retrieval failed");
+ nodep = Curl_hash_pick(&hash_static, &key2, strlen(key2));
+ fail_unless(nodep == key2, "hash retrieval failed");
+ nodep = Curl_hash_pick(&hash_static, &key3, strlen(key3));
+ fail_unless(nodep == key3, "hash retrieval failed");
+ nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
+ fail_unless(nodep == key4, "hash retrieval failed");
+
+ /* Delete the second of two entries in a bucket */
+ rc = Curl_hash_delete(&hash_static, &key4, strlen(key4));
+ fail_unless(rc == 0, "hash delete failed");
+ nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
+ fail_unless(nodep == key1, "hash retrieval failed");
+ nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
+ fail_unless(!nodep, "hash retrieval should have failed");
+
+ /* Insert that deleted node again */
+ nodep = Curl_hash_add(&hash_static, &key4, strlen(key4), &key4);
+ fail_unless(nodep, "insertion into hash failed");
+ nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
+ fail_unless(nodep == key4, "hash retrieval failed");
+
+ /* Delete the first of two entries in a bucket */
+ rc = Curl_hash_delete(&hash_static, &key1, strlen(key1));
+ fail_unless(rc == 0, "hash delete failed");
+ nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
+ fail_unless(!nodep, "hash retrieval should have failed");
+ nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
+ fail_unless(nodep == key4, "hash retrieval failed");
+
+ /* Delete the remaining one of two entries in a bucket */
+ rc = Curl_hash_delete(&hash_static, &key4, strlen(key4));
+ fail_unless(rc == 0, "hash delete failed");
+ nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
+ fail_unless(!nodep, "hash retrieval should have failed");
+ nodep = Curl_hash_pick(&hash_static, &key4, strlen(key4));
+ fail_unless(!nodep, "hash retrieval should have failed");
+
+ /* Delete an already deleted node */
+ rc = Curl_hash_delete(&hash_static, &key4, strlen(key4));
+ fail_unless(rc, "hash delete should have failed");
+
+ /* Replace an existing node */
+ nodep = Curl_hash_add(&hash_static, &key1, strlen(key1), &notakey);
+ fail_unless(nodep, "insertion into hash failed");
+ nodep = Curl_hash_pick(&hash_static, &key1, strlen(key1));
+ fail_unless(nodep == notakey, "hash retrieval failed");
+
+ /* Make sure all remaining elements are still accessible */
+ nodep = Curl_hash_pick(&hash_static, &key2, strlen(key2));
+ fail_unless(nodep == key2, "hash retrieval failed");
+ nodep = Curl_hash_pick(&hash_static, &key3, strlen(key3));
+ fail_unless(nodep == key3, "hash retrieval failed");
+
+ /* Clean up */
+ Curl_hash_clean(&hash_static);
+
+UNITTEST_STOP