aboutsummaryrefslogtreecommitdiff
path: root/lib/util
diff options
context:
space:
mode:
authorDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2022-03-10 23:59:29 +0100
committerDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2022-03-11 12:32:50 +0100
commit2a3f02fd36a9152bcafaa05bddebbdb4bd2f41e6 (patch)
treecfc11fa7f1adf7259219214a0bde33554e87d378 /lib/util
parent82f83c9515aaf99d12f6aa101c4d7b7463850e8b (diff)
More defensive programming in mem_pool_allocate
Abort and retry in situations that should logically _never_ _ever_ happen. Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at>
Diffstat (limited to 'lib/util')
-rw-r--r--lib/util/mempool.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/lib/util/mempool.c b/lib/util/mempool.c
index 9b2d905..5191fa2 100644
--- a/lib/util/mempool.c
+++ b/lib/util/mempool.c
@@ -142,7 +142,7 @@ void *mem_pool_allocate(mem_pool_t *mem)
size_t idx, i, j;
void *ptr = NULL;
pool_t *it;
-
+retry_pool:
for (it = mem->pool_list; it != NULL; it = it->next) {
if (it->obj_free > 0)
break;
@@ -162,11 +162,21 @@ void *mem_pool_allocate(mem_pool_t *mem)
break;
}
+ if (i == mem->bitmap_count) {
+ it->obj_free = 0;
+ goto retry_pool;
+ }
+
for (j = 0; j < (sizeof(it->bitmap[i]) * CHAR_BIT); ++j) {
if (!(it->bitmap[i] & (1UL << j)))
break;
}
+ if (j == (sizeof(it->bitmap[i]) * CHAR_BIT)) {
+ it->obj_free = 0;
+ goto retry_pool;
+ }
+
idx = i * sizeof(unsigned int) * CHAR_BIT + j;
ptr = it->data + idx * mem->obj_size;