aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Turner <mattst88@gmail.com>2020-04-19 16:01:22 -0700
committerDavid Oberhollenzer <goliath@infraroot.at>2020-04-22 14:48:46 +0200
commit20756f4354f333005bc59a2d07593d5d1429d287 (patch)
treedf1e0475927fa7b395c20ed9e777011f9f592b63
parent566f67ce915f9175ed9075bb1d6c553249c9a426 (diff)
Import and use Mesa's hash table
With `perf record`/`perf report` I saw that 30% of the time was spent in `sqfs_frag_table_find_tail_end` with tar2sqfs for a tarball containing the Gentoo ebuild repository (many thousands of small files). The reason was the bucketing hash table in frag_table.c: too many elements in too few buckets meant lots of walking over the linked lists. This patch replaces that hash table with the hash table implementation from Mesa. Its implementation is more complex (is is an open-addressing, linear-reprobing) hash table, but it is much better suited for the task. On my 4c/8t Skylake, the time to run tar2sqfs drops from 7.5s to less than 3s. CPU usage increases from ~207% to ~356%, presumably indicating an increase in available parallelism due to the removal of the hash table as a bottleneck. The `perf report` profile with this patch shows that the time spent in `sqfs_frag_table_find_tail_end` has dropped from ~30% to 0.01%. Output from ministat: x before + after N Min Max Median Avg Stddev x 20 7.476 7.685 7.5725 7.5615 0.051254268 + 20 2.79 2.901 2.846 2.84475 0.03543842 Difference at 95.0% confidence -4.71675 +/- 0.0282015 -62.3785% +/- 0.241477% (Student's t, pooled s = 0.0440618) I imported only the bits of the hash table implementation that were needed for frag_table.c. Among the changes I made after importing are - removed usage of ralloc, Mesa's recursive memory allocator - Replaced ralloc -> malloc ralloc_free -> free rzalloc_array -> calloc - Removed mem_ctx parameters - Added free()s to the appropriate places (valgrind confirms there are no leaks) - removed _mesa_-prefix from function names Fixes: #40 Signed-off-by: Matt Turner <mattst88@gmail.com>
-rw-r--r--COPYING.md2
-rw-r--r--lib/sqfs/Makemodule.am1
-rw-r--r--lib/sqfs/frag_table.c116
-rw-r--r--lib/util/fast_urem_by_const.h75
-rw-r--r--lib/util/hash_table.c415
-rw-r--r--lib/util/hash_table.h86
6 files changed, 623 insertions, 72 deletions
diff --git a/COPYING.md b/COPYING.md
index 00d539a..b86177e 100644
--- a/COPYING.md
+++ b/COPYING.md
@@ -14,6 +14,8 @@ with the following exceptions:
- `lib/sqfs/comp/zlib` contains files that have been extracted from the the
zlib compression library and modified. See `lib/sqfs/comp/zlib/README` for
details and `licenses/zlib.txt` for details.
+ - `lib/util/hash_table.*` and `lib/util/fast_urem_by_const.h` contain a hash
+ table implementation (MIT license).
The rest of squashfs-tools-ng is released under the terms and conditions of
the **GNU General Public License version 3 or later**.
diff --git a/lib/sqfs/Makemodule.am b/lib/sqfs/Makemodule.am
index 942c37c..32b4c25 100644
--- a/lib/sqfs/Makemodule.am
+++ b/lib/sqfs/Makemodule.am
@@ -36,6 +36,7 @@ libsquashfs_la_LIBADD += $(ZSTD_LIBS) $(PTHREAD_LIBS)
# directly "import" stuff from libutil
libsquashfs_la_SOURCES += lib/util/str_table.c lib/util/alloc.c
libsquashfs_la_SOURCES += lib/util/xxhash.c
+libsquashfs_la_SOURCES += lib/util/hash_table.c lib/util/hash_table.h
if WINDOWS
libsquashfs_la_SOURCES += lib/sqfs/win32/io_file.c
diff --git a/lib/sqfs/frag_table.c b/lib/sqfs/frag_table.c
index 51164e6..59811bb 100644
--- a/lib/sqfs/frag_table.c
+++ b/lib/sqfs/frag_table.c
@@ -14,13 +14,12 @@
#include "sqfs/block.h"
#include "compat.h"
+#include "lib/util/hash_table.h"
+
#include <stdlib.h>
#include <string.h>
-#define NUM_BUCKETS (128)
-
-
typedef struct chunk_info_t {
struct chunk_info_t *next;
sqfs_u32 index;
@@ -37,23 +36,32 @@ struct sqfs_frag_table_t {
size_t used;
sqfs_fragment_t *table;
- chunk_info_t chunks[NUM_BUCKETS];
+ struct hash_table *ht;
};
+static uint32_t chunk_info_hash(const void *key)
+{
+ const chunk_info_t *chunk = key;
+ return chunk->hash;
+}
+
+static bool chunk_info_equals(const void *a, const void *b)
+{
+ const chunk_info_t *a_ = a, *b_ = b;
+ return a_->size == b_->size &&
+ a_->hash == b_->hash;
+}
+
+static void delete_function(struct hash_entry *entry)
+{
+ free(entry->data);
+}
+
static void frag_table_destroy(sqfs_object_t *obj)
{
sqfs_frag_table_t *tbl = (sqfs_frag_table_t *)obj;
- chunk_info_t *info;
- size_t i;
-
- for (i = 0; i < NUM_BUCKETS; ++i) {
- while (tbl->chunks[i].next != NULL) {
- info = tbl->chunks[i].next;
- tbl->chunks[i].next = info->next;
- free(info);
- }
- }
+ hash_table_destroy(tbl->ht, delete_function);
free(tbl->table);
free(tbl);
}
@@ -62,38 +70,15 @@ static sqfs_object_t *frag_table_copy(const sqfs_object_t *obj)
{
const sqfs_frag_table_t *tbl = (const sqfs_frag_table_t *)obj;
sqfs_frag_table_t *copy;
- const chunk_info_t *it;
- chunk_info_t *last;
- size_t i;
copy = malloc(sizeof(*copy));
if (copy == NULL)
return NULL;
memcpy(copy, tbl, sizeof(*tbl));
- for (i = 0; i < NUM_BUCKETS; ++i)
- copy->chunks[i].next = NULL;
-
- for (i = 0; i < NUM_BUCKETS; ++i) {
- last = &(copy->chunks[i]);
- it = tbl->chunks[i].next;
-
- while (it != NULL) {
- last->next = malloc(sizeof(*it));
- if (last->next == NULL)
- goto fail;
-
- memcpy(last->next, it, sizeof(*it));
- last = last->next;
- last->next = NULL;
- it = it->next;
- }
- }
+ copy->ht = hash_table_clone(tbl->ht);
return (sqfs_object_t *)copy;
-fail:
- frag_table_destroy((sqfs_object_t *)copy);
- return NULL;
}
sqfs_frag_table_t *sqfs_frag_table_create(sqfs_u32 flags)
@@ -107,6 +92,8 @@ sqfs_frag_table_t *sqfs_frag_table_create(sqfs_u32 flags)
if (tbl == NULL)
return NULL;
+ tbl->ht = hash_table_create(chunk_info_hash, chunk_info_equals);
+
((sqfs_object_t *)tbl)->copy = frag_table_copy;
((sqfs_object_t *)tbl)->destroy = frag_table_destroy;
return tbl;
@@ -267,30 +254,16 @@ int sqfs_frag_table_add_tail_end(sqfs_frag_table_t *tbl,
sqfs_u32 index, sqfs_u32 offset,
sqfs_u32 size, sqfs_u32 hash)
{
- size_t idx = hash % NUM_BUCKETS;
- chunk_info_t *new, *it;
-
- if (tbl->chunks[idx].size == 0 && tbl->chunks[idx].hash == 0) {
- tbl->chunks[idx].index = index;
- tbl->chunks[idx].offset = offset;
- tbl->chunks[idx].size = size;
- tbl->chunks[idx].hash = hash;
- } else {
- new = calloc(1, sizeof(*new));
- if (new == NULL)
- return SQFS_ERROR_ALLOC;
+ chunk_info_t *new = calloc(1, sizeof(*new));
+ if (new == NULL)
+ return SQFS_ERROR_ALLOC;
- new->index = index;
- new->offset = offset;
- new->size = size;
- new->hash = hash;
+ new->index = index;
+ new->offset = offset;
+ new->size = size;
+ new->hash = hash;
- it = &tbl->chunks[idx];
- while (it->next != NULL)
- it = it->next;
-
- it->next = new;
- }
+ hash_table_insert_pre_hashed(tbl->ht, new->hash, new, new);
return 0;
}
@@ -299,19 +272,18 @@ int sqfs_frag_table_find_tail_end(sqfs_frag_table_t *tbl,
sqfs_u32 hash, sqfs_u32 size,
sqfs_u32 *index, sqfs_u32 *offset)
{
- size_t idx = hash % NUM_BUCKETS;
- chunk_info_t *it;
+ struct hash_entry *entry;
+ chunk_info_t *chunk, search;
- if (tbl->chunks[idx].size == 0 && tbl->chunks[idx].hash == 0)
- return SQFS_ERROR_NO_ENTRY;
+ search.hash = hash;
+ search.size = size;
- for (it = &tbl->chunks[idx]; it != NULL; it = it->next) {
- if (it->hash == hash && it->size == size) {
- *index = it->index;
- *offset = it->offset;
- return 0;
- }
- }
+ entry = hash_table_search_pre_hashed(tbl->ht, hash, &search);
+ if (!entry)
+ return SQFS_ERROR_NO_ENTRY;
- return SQFS_ERROR_NO_ENTRY;
+ chunk = entry->data;
+ *index = chunk->index;
+ *offset = chunk->offset;
+ return 0;
}
diff --git a/lib/util/fast_urem_by_const.h b/lib/util/fast_urem_by_const.h
new file mode 100644
index 0000000..62b22a6
--- /dev/null
+++ b/lib/util/fast_urem_by_const.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright © 2010 Valve Software
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+/*
+ * Code for fast 32-bit unsigned remainder, based off of "Faster Remainder by
+ * Direct Computation: Applications to Compilers and Software Libraries,"
+ * available at https://arxiv.org/pdf/1902.01961.pdf.
+ *
+ * util_fast_urem32(n, d, REMAINDER_MAGIC(d)) returns the same thing as
+ * n % d for any unsigned n and d, however it compiles down to only a few
+ * multiplications, so it should be faster than plain uint32_t modulo if the
+ * same divisor is used many times.
+ */
+
+#define REMAINDER_MAGIC(divisor) \
+ ((uint64_t) ~0ull / (divisor) + 1)
+
+/*
+ * Get bits 64-96 of a 32x64-bit multiply. If __int128_t is available, we use
+ * it, which usually compiles down to one instruction on 64-bit architectures.
+ * Otherwise on 32-bit architectures we usually get four instructions (one
+ * 32x32->64 multiply, one 32x32->32 multiply, and one 64-bit add).
+ */
+
+static inline uint32_t
+_mul32by64_hi(uint32_t a, uint64_t b)
+{
+#ifdef HAVE_UINT128
+ return ((__uint128_t) b * a) >> 64;
+#else
+ /*
+ * Let b = b0 + 2^32 * b1. Then a * b = a * b0 + 2^32 * a * b1. We would
+ * have to do a 96-bit addition to get the full result, except that only
+ * one term has non-zero lower 32 bits, which means that to get the high 32
+ * bits, we only have to add the high 64 bits of each term. Unfortunately,
+ * we have to do the 64-bit addition in case the low 32 bits overflow.
+ */
+ uint32_t b0 = (uint32_t) b;
+ uint32_t b1 = b >> 32;
+ return ((((uint64_t) a * b0) >> 32) + (uint64_t) a * b1) >> 32;
+#endif
+}
+
+static inline uint32_t
+util_fast_urem32(uint32_t n, uint32_t d, uint64_t magic)
+{
+ uint64_t lowbits = magic * n;
+ uint32_t result = _mul32by64_hi(d, lowbits);
+ assert(result == n % d);
+ return result;
+}
+
diff --git a/lib/util/hash_table.c b/lib/util/hash_table.c
new file mode 100644
index 0000000..d2cce54
--- /dev/null
+++ b/lib/util/hash_table.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright © 2009,2012 Intel Corporation
+ * Copyright © 1988-2004 Keith Packard and Bart Massey.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the names of the authors
+ * or their institutions shall not be used in advertising or
+ * otherwise to promote the sale, use or other dealings in this
+ * Software without prior written authorization from the
+ * authors.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ */
+
+/**
+ * Implements an open-addressing, linear-reprobing hash table.
+ *
+ * For more information, see:
+ *
+ * http://cgit.freedesktop.org/~anholt/hash_table/tree/README
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "fast_urem_by_const.h"
+#include "hash_table.h"
+
+# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+static const uint32_t deleted_key_value;
+
+/**
+ * From Knuth -- a good choice for hash/rehash values is p, p-2 where
+ * p and p-2 are both prime. These tables are sized to have an extra 10%
+ * free to avoid exponential performance degradation as the hash table fills
+ */
+static const struct {
+ uint32_t max_entries, size, rehash;
+ uint64_t size_magic, rehash_magic;
+} hash_sizes[] = {
+#define ENTRY(max_entries, size, rehash) \
+ { max_entries, size, rehash, \
+ REMAINDER_MAGIC(size), REMAINDER_MAGIC(rehash) }
+
+ ENTRY(2, 5, 3 ),
+ ENTRY(4, 7, 5 ),
+ ENTRY(8, 13, 11 ),
+ ENTRY(16, 19, 17 ),
+ ENTRY(32, 43, 41 ),
+ ENTRY(64, 73, 71 ),
+ ENTRY(128, 151, 149 ),
+ ENTRY(256, 283, 281 ),
+ ENTRY(512, 571, 569 ),
+ ENTRY(1024, 1153, 1151 ),
+ ENTRY(2048, 2269, 2267 ),
+ ENTRY(4096, 4519, 4517 ),
+ ENTRY(8192, 9013, 9011 ),
+ ENTRY(16384, 18043, 18041 ),
+ ENTRY(32768, 36109, 36107 ),
+ ENTRY(65536, 72091, 72089 ),
+ ENTRY(131072, 144409, 144407 ),
+ ENTRY(262144, 288361, 288359 ),
+ ENTRY(524288, 576883, 576881 ),
+ ENTRY(1048576, 1153459, 1153457 ),
+ ENTRY(2097152, 2307163, 2307161 ),
+ ENTRY(4194304, 4613893, 4613891 ),
+ ENTRY(8388608, 9227641, 9227639 ),
+ ENTRY(16777216, 18455029, 18455027 ),
+ ENTRY(33554432, 36911011, 36911009 ),
+ ENTRY(67108864, 73819861, 73819859 ),
+ ENTRY(134217728, 147639589, 147639587 ),
+ ENTRY(268435456, 295279081, 295279079 ),
+ ENTRY(536870912, 590559793, 590559791 ),
+ ENTRY(1073741824, 1181116273, 1181116271 ),
+ ENTRY(2147483648ul, 2362232233ul, 2362232231ul )
+};
+
+static inline bool
+key_pointer_is_reserved(const struct hash_table *ht, const void *key)
+{
+ return key == NULL || key == ht->deleted_key;
+}
+
+static int
+entry_is_free(const struct hash_entry *entry)
+{
+ return entry->key == NULL;
+}
+
+static int
+entry_is_deleted(const struct hash_table *ht, struct hash_entry *entry)
+{
+ return entry->key == ht->deleted_key;
+}
+
+static int
+entry_is_present(const struct hash_table *ht, struct hash_entry *entry)
+{
+ return entry->key != NULL && entry->key != ht->deleted_key;
+}
+
+static bool
+hash_table_init(struct hash_table *ht,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b))
+{
+ ht->size_index = 0;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->key_hash_function = key_hash_function;
+ ht->key_equals_function = key_equals_function;
+ ht->table = calloc(sizeof(struct hash_entry), ht->size);
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+ ht->deleted_key = &deleted_key_value;
+
+ return ht->table != NULL;
+}
+
+struct hash_table *
+hash_table_create(uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b))
+{
+ struct hash_table *ht;
+
+ ht = malloc(sizeof(struct hash_table));
+ if (ht == NULL)
+ return NULL;
+
+ if (!hash_table_init(ht, key_hash_function, key_equals_function)) {
+ free(ht);
+ return NULL;
+ }
+
+ return ht;
+}
+
+struct hash_table *
+hash_table_clone(struct hash_table *src)
+{
+ struct hash_table *ht;
+
+ ht = malloc(sizeof(struct hash_table));
+ if (ht == NULL)
+ return NULL;
+
+ memcpy(ht, src, sizeof(struct hash_table));
+
+ ht->table = calloc(sizeof(struct hash_entry), ht->size);
+ if (ht->table == NULL) {
+ free(ht);
+ return NULL;
+ }
+
+ memcpy(ht->table, src->table, ht->size * sizeof(struct hash_entry));
+
+ return ht;
+}
+
+/**
+ * Frees the given hash table.
+ */
+void
+hash_table_destroy(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry))
+{
+ if (!ht)
+ return;
+
+ if (delete_function) {
+ hash_table_foreach(ht, entry) {
+ delete_function(entry);
+ }
+ }
+ free(ht->table);
+ free(ht);
+}
+
+static struct hash_entry *
+hash_table_search(struct hash_table *ht, uint32_t hash, const void *key)
+{
+ assert(!key_pointer_is_reserved(ht, key));
+
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (entry_is_free(entry)) {
+ return NULL;
+ } else if (entry_is_present(ht, entry) && entry->hash == hash) {
+ if (ht->key_equals_function(key, entry->key)) {
+ return entry;
+ }
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_hash_address);
+
+ return NULL;
+}
+
+/**
+ * Finds a hash table entry with the given key and hash of that key.
+ *
+ * Returns NULL if no entry is found. Note that the data pointer may be
+ * modified by the user.
+ */
+struct hash_entry *
+hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key)
+{
+ assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
+ return hash_table_search(ht, hash, key);
+}
+
+static void
+hash_table_insert_rehash(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (entry->key == NULL) {
+ entry->hash = hash;
+ entry->key = key;
+ entry->data = data;
+ return;
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (true);
+}
+
+static void
+hash_table_rehash(struct hash_table *ht, unsigned new_size_index)
+{
+ struct hash_table old_ht;
+ struct hash_entry *table;
+
+ if (new_size_index >= ARRAY_SIZE(hash_sizes))
+ return;
+
+ table = calloc(sizeof(struct hash_entry), hash_sizes[new_size_index].size);
+ if (table == NULL)
+ return;
+
+ old_ht = *ht;
+
+ ht->table = table;
+ ht->size_index = new_size_index;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+
+ hash_table_foreach(&old_ht, entry) {
+ hash_table_insert_rehash(ht, entry->hash, entry->key, entry->data);
+ }
+
+ ht->entries = old_ht.entries;
+
+ free(old_ht.table);
+}
+
+static struct hash_entry *
+hash_table_insert(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ struct hash_entry *available_entry = NULL;
+
+ assert(!key_pointer_is_reserved(ht, key));
+
+ if (ht->entries >= ht->max_entries) {
+ hash_table_rehash(ht, ht->size_index + 1);
+ } else if (ht->deleted_entries + ht->entries >= ht->max_entries) {
+ hash_table_rehash(ht, ht->size_index);
+ }
+
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (!entry_is_present(ht, entry)) {
+ /* Stash the first available entry we find */
+ if (available_entry == NULL)
+ available_entry = entry;
+ if (entry_is_free(entry))
+ break;
+ }
+
+ /* Implement replacement when another insert happens
+ * with a matching key. This is a relatively common
+ * feature of hash tables, with the alternative
+ * generally being "insert the new value as well, and
+ * return it first when the key is searched for".
+ *
+ * Note that the hash table doesn't have a delete
+ * callback. If freeing of old data pointers is
+ * required to avoid memory leaks, perform a search
+ * before inserting.
+ */
+ if (!entry_is_deleted(ht, entry) &&
+ entry->hash == hash &&
+ ht->key_equals_function(key, entry->key)) {
+ entry->key = key;
+ entry->data = data;
+ return entry;
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_hash_address);
+
+ if (available_entry) {
+ if (entry_is_deleted(ht, available_entry))
+ ht->deleted_entries--;
+ available_entry->hash = hash;
+ available_entry->key = key;
+ available_entry->data = data;
+ ht->entries++;
+ return available_entry;
+ }
+
+ /* We could hit here if a required resize failed. An unchecked-malloc
+ * application could ignore this result.
+ */
+ return NULL;
+}
+
+/**
+ * Inserts the key with the given hash into the table.
+ *
+ * Note that insertion may rearrange the table on a resize or rehash,
+ * so previously found hash_entries are no longer valid after this function.
+ */
+struct hash_entry *
+hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
+ return hash_table_insert(ht, hash, key, data);
+}
+
+/**
+ * This function is an iterator over the hash table.
+ *
+ * Pass in NULL for the first entry, as in the start of a for loop. Note that
+ * an iteration over the table is O(table_size) not O(entries).
+ */
+struct hash_entry *
+hash_table_next_entry(struct hash_table *ht,
+ struct hash_entry *entry)
+{
+ if (entry == NULL)
+ entry = ht->table;
+ else
+ entry = entry + 1;
+
+ for (; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(ht, entry)) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
diff --git a/lib/util/hash_table.h b/lib/util/hash_table.h
new file mode 100644
index 0000000..ccbd9c0
--- /dev/null
+++ b/lib/util/hash_table.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright © 2009,2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _HASH_TABLE_H
+#define _HASH_TABLE_H
+
+#include <stdlib.h>
+#include <inttypes.h>
+#include <stdbool.h>
+
+struct hash_entry {
+ uint32_t hash;
+ const void *key;
+ void *data;
+};
+
+struct hash_table {
+ struct hash_entry *table;
+ uint32_t (*key_hash_function)(const void *key);
+ bool (*key_equals_function)(const void *a, const void *b);
+ const void *deleted_key;
+ uint32_t size;
+ uint32_t rehash;
+ uint64_t size_magic;
+ uint64_t rehash_magic;
+ uint32_t max_entries;
+ uint32_t size_index;
+ uint32_t entries;
+ uint32_t deleted_entries;
+};
+
+struct hash_table *
+hash_table_create(uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b));
+
+struct hash_table *
+hash_table_clone(struct hash_table *src);
+void hash_table_destroy(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry));
+
+struct hash_entry *
+hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data);
+struct hash_entry *
+hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key);
+
+struct hash_entry *hash_table_next_entry(struct hash_table *ht,
+ struct hash_entry *entry);
+
+/**
+ * This foreach function is safe against deletion (which just replaces
+ * an entry's data with the deleted marker), but not against insertion
+ * (which may rehash the table, making entry a dangling pointer).
+ */
+#define hash_table_foreach(ht, entry) \
+ for (struct hash_entry *entry = hash_table_next_entry(ht, NULL); \
+ entry != NULL; \
+ entry = hash_table_next_entry(ht, entry))
+
+#endif /* _HASH_TABLE_H */