summaryrefslogtreecommitdiffstats
path: root/src/hashtable.c
diff options
context:
space:
mode:
authorGravatar Nikias Bassen2016-05-12 02:32:55 +0200
committerGravatar Nikias Bassen2016-05-12 02:32:55 +0200
commit2af7318239c59555869d018bc355fe0e21d900c6 (patch)
tree3b366f10c04d305c9bd72e31e28ca62596ba07dd /src/hashtable.c
parent6ab7e301f1854fd18891ddfeaa64e7485be990ba (diff)
downloadlibplist-2af7318239c59555869d018bc355fe0e21d900c6.tar.gz
libplist-2af7318239c59555869d018bc355fe0e21d900c6.tar.bz2
bplist: Speed up plist_to_bin conversion for large plists
Using a better hashing algorithm and a larger hash table the conversion is A LOT faster when processing large plists. Thanks to Xiao Deng for reporting this issue and suggesting a fix.
Diffstat (limited to 'src/hashtable.c')
-rw-r--r--src/hashtable.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/hashtable.c b/src/hashtable.c
index 08ff934..3568f3c 100644
--- a/src/hashtable.c
+++ b/src/hashtable.c
@@ -24,7 +24,7 @@ hashtable_t* hash_table_new(hash_func_t hash_func, compare_func_t compare_func)
{
hashtable_t* ht = (hashtable_t*)malloc(sizeof(hashtable_t));
int i;
- for (i = 0; i < 256; i++) {
+ for (i = 0; i < 4096; i++) {
ht->entries[i] = NULL;
}
ht->count = 0;
@@ -38,7 +38,7 @@ void hash_table_destroy(hashtable_t *ht)
if (!ht) return;
int i = 0;
- for (i = 0; i < 256; i++) {
+ for (i = 0; i < 4096; i++) {
if (ht->entries[i]) {
hashentry_t* e = ht->entries[i];
while (e) {
@@ -58,7 +58,7 @@ void hash_table_insert(hashtable_t* ht, void *key, void *value)
unsigned int hash = ht->hash_func(key);
- int idx0 = hash & 0xFF;
+ int idx0 = hash & 0xFFF;
// get the idx0 list
hashentry_t* e = ht->entries[idx0];
@@ -93,7 +93,7 @@ void* hash_table_lookup(hashtable_t* ht, void *key)
if (!ht || !key) return NULL;
unsigned int hash = ht->hash_func(key);
- int idx0 = hash & 0xFF;
+ int idx0 = hash & 0xFFF;
hashentry_t* e = ht->entries[idx0];
while (e) {