From 9d7d0a84a2017af2e70cc0f33bfbce0b59470e62 Mon Sep 17 00:00:00 2001 From: David Oberhollenzer Date: Tue, 17 Sep 2019 14:29:29 +0200 Subject: Remove parallel unpacking Parallel unpacking didn't really improve the speed that much. Actually sorting the files for optimized unpack order improved speed much more than the parallel unpacker. Furthermore, the fork based parallel unpacker was actually pretty messy to begin with. Signed-off-by: David Oberhollenzer --- include/fstree.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/fstree.h b/include/fstree.h index 880a870..05e1ebe 100644 --- a/include/fstree.h +++ b/include/fstree.h @@ -295,12 +295,9 @@ tree_node_t *fstree_node_from_path(fstree_t *fs, const char *path); /* Optimize the order of the fstree file list for unpacking as to avoid unpacking fragment blocks more than once and to improve locality when - fetching data from disk. The resulting list is returned in 'out'. - If num_jobs is > 1, the list is split up for parallel processing. + fetching data from disk. */ -void optimize_unpack_order(fstree_t *fs, size_t num_jobs, - file_info_t *out[num_jobs]); - +file_info_t *optimize_unpack_order(fstree_t *fs); /* Convert back to forward slashed, remove all preceeding and trailing slashes, -- cgit v1.2.3