summaryrefslogtreecommitdiff
path: root/include/fstree.h
diff options
context:
space:
mode:
authorDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2019-09-17 14:29:29 +0200
committerDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2019-09-20 03:18:47 +0200
commit9d7d0a84a2017af2e70cc0f33bfbce0b59470e62 (patch)
treef06ddabcebc1210d3764ada396284b46cebedc8d /include/fstree.h
parent544f8f6dfd2f61fd1d2ab7a9a955e63d4b416dcc (diff)
Remove parallel unpacking
Parallel unpacking didn't really improve the speed that much. Actually sorting the files for optimized unpack order improved speed much more than the parallel unpacker. Furthermore, the fork based parallel unpacker was actually pretty messy to begin with. Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at>
Diffstat (limited to 'include/fstree.h')
-rw-r--r--include/fstree.h7
1 files changed, 2 insertions, 5 deletions
diff --git a/include/fstree.h b/include/fstree.h
index 880a870..05e1ebe 100644
--- a/include/fstree.h
+++ b/include/fstree.h
@@ -295,12 +295,9 @@ tree_node_t *fstree_node_from_path(fstree_t *fs, const char *path);
/*
Optimize the order of the fstree file list for unpacking as to avoid
unpacking fragment blocks more than once and to improve locality when
- fetching data from disk. The resulting list is returned in 'out'.
- If num_jobs is > 1, the list is split up for parallel processing.
+ fetching data from disk.
*/
-void optimize_unpack_order(fstree_t *fs, size_t num_jobs,
- file_info_t *out[num_jobs]);
-
+file_info_t *optimize_unpack_order(fstree_t *fs);
/*
Convert back to forward slashed, remove all preceeding and trailing slashes,