aboutsummaryrefslogtreecommitdiff
path: root/lib/fstree/optimize_unpack_order.c
diff options
context:
space:
mode:
authorDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2019-09-17 14:29:29 +0200
committerDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2019-09-20 03:18:47 +0200
commit9d7d0a84a2017af2e70cc0f33bfbce0b59470e62 (patch)
treef06ddabcebc1210d3764ada396284b46cebedc8d /lib/fstree/optimize_unpack_order.c
parent544f8f6dfd2f61fd1d2ab7a9a955e63d4b416dcc (diff)
Remove parallel unpacking
Parallel unpacking didn't really improve the speed that much. Actually sorting the files for optimized unpack order improved speed much more than the parallel unpacker. Furthermore, the fork based parallel unpacker was actually pretty messy to begin with. Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at>
Diffstat (limited to 'lib/fstree/optimize_unpack_order.c')
-rw-r--r--lib/fstree/optimize_unpack_order.c55
1 files changed, 2 insertions, 53 deletions
diff --git a/lib/fstree/optimize_unpack_order.c b/lib/fstree/optimize_unpack_order.c
index 51576bd..972d4b3 100644
--- a/lib/fstree/optimize_unpack_order.c
+++ b/lib/fstree/optimize_unpack_order.c
@@ -106,65 +106,14 @@ static file_info_t *list_sort(const fstree_t *fs, file_info_t *head)
return merge(fs, list_sort(fs, head), list_sort(fs, half));
}
-static file_info_t *split_list(file_info_t *list, uint64_t threashold)
-{
- file_info_t *it, *new = NULL;
- uint64_t size = 0;
-
- for (it = list; it != NULL; it = it->next) {
- size += it->size - it->sparse;
-
- if (size >= threashold) {
- new = it->next;
- it->next = NULL;
- break;
- }
- }
-
- return new;
-}
-
-static uint64_t total_size(file_info_t *list)
-{
- uint64_t size = 0;
- file_info_t *it;
-
- for (it = list; it != NULL; it = it->next)
- size += it->size - it->sparse;
-
- return size;
-}
-
-void optimize_unpack_order(fstree_t *fs, size_t num_jobs,
- file_info_t *out[num_jobs])
+file_info_t *optimize_unpack_order(fstree_t *fs)
{
file_info_t *file_list;
- uint64_t threshold;
- size_t i;
-
- if (num_jobs < 1)
- return;
-
- for (i = 0; i < num_jobs; ++i)
- out[i] = NULL;
file_list = list_sort(fs, fs->files);
while (file_list != NULL && file_list->input_file == NULL)
file_list = file_list->next;
fs->files = NULL;
-
- if (num_jobs < 2) {
- out[0] = file_list;
- return;
- }
-
- threshold = total_size(file_list) / num_jobs;
-
- for (i = 0; i < (num_jobs - 1); ++i) {
- out[i] = file_list;
- file_list = split_list(file_list, threshold);
- }
-
- out[i] = file_list;
+ return file_list;
}