From e942ed9b277cd8058e9ab1b5a762ee399f5231f0 Mon Sep 17 00:00:00 2001 From: David Oberhollenzer Date: Thu, 21 May 2020 19:50:19 +0200 Subject: hash table: switch to sqfs_* types, mark functions as hidden Signed-off-by: David Oberhollenzer --- lib/util/fast_urem_by_const.h | 24 ++++++++++++----------- lib/util/hash_table.c | 44 +++++++++++++++++++++---------------------- 2 files changed, 35 insertions(+), 33 deletions(-) (limited to 'lib') diff --git a/lib/util/fast_urem_by_const.h b/lib/util/fast_urem_by_const.h index f5b0664..073f9e0 100644 --- a/lib/util/fast_urem_by_const.h +++ b/lib/util/fast_urem_by_const.h @@ -21,6 +21,8 @@ * IN THE SOFTWARE. */ +#include "sqfs/predef.h" + #include #include @@ -31,12 +33,12 @@ * * util_fast_urem32(n, d, REMAINDER_MAGIC(d)) returns the same thing as * n % d for any unsigned n and d, however it compiles down to only a few - * multiplications, so it should be faster than plain uint32_t modulo if the + * multiplications, so it should be faster than plain sqfs_u32 modulo if the * same divisor is used many times. */ #define REMAINDER_MAGIC(divisor) \ - ((uint64_t) ~0ull / (divisor) + 1) + ((sqfs_u64) ~0ull / (divisor) + 1) /* * Get bits 64-96 of a 32x64-bit multiply. If __int128_t is available, we use @@ -45,8 +47,8 @@ * 32x32->64 multiply, one 32x32->32 multiply, and one 64-bit add). */ -static inline uint32_t -_mul32by64_hi(uint32_t a, uint64_t b) +static inline sqfs_u32 +_mul32by64_hi(sqfs_u32 a, sqfs_u64 b) { #if __SIZEOF_INT128__ == 16 return ((__uint128_t) b * a) >> 64; @@ -58,17 +60,17 @@ _mul32by64_hi(uint32_t a, uint64_t b) * bits, we only have to add the high 64 bits of each term. Unfortunately, * we have to do the 64-bit addition in case the low 32 bits overflow. */ - uint32_t b0 = (uint32_t) b; - uint32_t b1 = b >> 32; - return ((((uint64_t) a * b0) >> 32) + (uint64_t) a * b1) >> 32; + sqfs_u32 b0 = (sqfs_u32) b; + sqfs_u32 b1 = b >> 32; + return ((((sqfs_u64) a * b0) >> 32) + (sqfs_u64) a * b1) >> 32; #endif } -static inline uint32_t -util_fast_urem32(uint32_t n, uint32_t d, uint64_t magic) +static inline sqfs_u32 +util_fast_urem32(sqfs_u32 n, sqfs_u32 d, sqfs_u64 magic) { - uint64_t lowbits = magic * n; - uint32_t result = _mul32by64_hi(d, lowbits); + sqfs_u64 lowbits = magic * n; + sqfs_u32 result = _mul32by64_hi(d, lowbits); assert(result == n % d); return result; } diff --git a/lib/util/hash_table.c b/lib/util/hash_table.c index d2cce54..63882f3 100644 --- a/lib/util/hash_table.c +++ b/lib/util/hash_table.c @@ -49,7 +49,7 @@ # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -static const uint32_t deleted_key_value; +static const sqfs_u32 deleted_key_value; /** * From Knuth -- a good choice for hash/rehash values is p, p-2 where @@ -57,8 +57,8 @@ static const uint32_t deleted_key_value; * free to avoid exponential performance degradation as the hash table fills */ static const struct { - uint32_t max_entries, size, rehash; - uint64_t size_magic, rehash_magic; + sqfs_u32 max_entries, size, rehash; + sqfs_u64 size_magic, rehash_magic; } hash_sizes[] = { #define ENTRY(max_entries, size, rehash) \ { max_entries, size, rehash, \ @@ -123,7 +123,7 @@ entry_is_present(const struct hash_table *ht, struct hash_entry *entry) static bool hash_table_init(struct hash_table *ht, - uint32_t (*key_hash_function)(const void *key), + sqfs_u32 (*key_hash_function)(const void *key), bool (*key_equals_function)(const void *a, const void *b)) { @@ -144,7 +144,7 @@ hash_table_init(struct hash_table *ht, } struct hash_table * -hash_table_create(uint32_t (*key_hash_function)(const void *key), +hash_table_create(sqfs_u32 (*key_hash_function)(const void *key), bool (*key_equals_function)(const void *a, const void *b)) { @@ -204,15 +204,15 @@ hash_table_destroy(struct hash_table *ht, } static struct hash_entry * -hash_table_search(struct hash_table *ht, uint32_t hash, const void *key) +hash_table_search(struct hash_table *ht, sqfs_u32 hash, const void *key) { assert(!key_pointer_is_reserved(ht, key)); - uint32_t size = ht->size; - uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic); - uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash, + sqfs_u32 size = ht->size; + sqfs_u32 start_hash_address = util_fast_urem32(hash, size, ht->size_magic); + sqfs_u32 double_hash = 1 + util_fast_urem32(hash, ht->rehash, ht->rehash_magic); - uint32_t hash_address = start_hash_address; + sqfs_u32 hash_address = start_hash_address; do { struct hash_entry *entry = ht->table + hash_address; @@ -240,7 +240,7 @@ hash_table_search(struct hash_table *ht, uint32_t hash, const void *key) * modified by the user. */ struct hash_entry * -hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash, +hash_table_search_pre_hashed(struct hash_table *ht, sqfs_u32 hash, const void *key) { assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key)); @@ -248,14 +248,14 @@ hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash, } static void -hash_table_insert_rehash(struct hash_table *ht, uint32_t hash, +hash_table_insert_rehash(struct hash_table *ht, sqfs_u32 hash, const void *key, void *data) { - uint32_t size = ht->size; - uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic); - uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash, + sqfs_u32 size = ht->size; + sqfs_u32 start_hash_address = util_fast_urem32(hash, size, ht->size_magic); + sqfs_u32 double_hash = 1 + util_fast_urem32(hash, ht->rehash, ht->rehash_magic); - uint32_t hash_address = start_hash_address; + sqfs_u32 hash_address = start_hash_address; do { struct hash_entry *entry = ht->table + hash_address; @@ -307,7 +307,7 @@ hash_table_rehash(struct hash_table *ht, unsigned new_size_index) } static struct hash_entry * -hash_table_insert(struct hash_table *ht, uint32_t hash, +hash_table_insert(struct hash_table *ht, sqfs_u32 hash, const void *key, void *data) { struct hash_entry *available_entry = NULL; @@ -320,11 +320,11 @@ hash_table_insert(struct hash_table *ht, uint32_t hash, hash_table_rehash(ht, ht->size_index); } - uint32_t size = ht->size; - uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic); - uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash, + sqfs_u32 size = ht->size; + sqfs_u32 start_hash_address = util_fast_urem32(hash, size, ht->size_magic); + sqfs_u32 double_hash = 1 + util_fast_urem32(hash, ht->rehash, ht->rehash_magic); - uint32_t hash_address = start_hash_address; + sqfs_u32 hash_address = start_hash_address; do { struct hash_entry *entry = ht->table + hash_address; @@ -383,7 +383,7 @@ hash_table_insert(struct hash_table *ht, uint32_t hash, * so previously found hash_entries are no longer valid after this function. */ struct hash_entry * -hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash, +hash_table_insert_pre_hashed(struct hash_table *ht, sqfs_u32 hash, const void *key, void *data) { assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key)); -- cgit v1.2.3