aboutsummaryrefslogtreecommitdiff
path: root/unpack
diff options
context:
space:
mode:
authorDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2019-08-04 16:56:08 +0200
committerDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2019-08-04 17:01:14 +0200
commit3a340b12eb9b7ed86a47391345cb836fa662b2d9 (patch)
treecf645d3ccef3347f10985f415a0755a5b0de36b9 /unpack
parentbf1dd4f1ab8ef70f96704c4e2bd95968e1615b37 (diff)
Improve file unpacking order
This commit moves the file unpacking order & job scheduling to a libfstree function. The ordering is improved by making sure fragment blocks are not extracted more than once and files with data blocks are extracted in order. This way, serial unpacking of a 2GiB Debian live image could be reduced from ~5' on my test machine to ~3.5', whereas parallel unpacking stays roughly the same (~3' for -j 4). Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at>
Diffstat (limited to 'unpack')
-rw-r--r--unpack/fill_files.c78
1 files changed, 21 insertions, 57 deletions
diff --git a/unpack/fill_files.c b/unpack/fill_files.c
index e104ae7..c0fc26c 100644
--- a/unpack/fill_files.c
+++ b/unpack/fill_files.c
@@ -38,64 +38,23 @@ static int fill_files(data_reader_t *data, file_info_t *list, int flags)
return 0;
}
-static file_info_t *split_list(file_info_t *list, uint64_t threashold)
-{
- file_info_t *it, *new = NULL;
- uint64_t size = 0;
-
- for (it = list; it != NULL; it = it->next) {
- if (it->input_file == NULL)
- continue;
-
- size += it->size - it->sparse;
-
- if (size >= threashold) {
- new = it->next;
- it->next = NULL;
- break;
- }
- }
-
- return new;
-}
-
-static uint64_t total_size(file_info_t *list)
-{
- uint64_t size = 0;
- file_info_t *it;
-
- for (it = list; it != NULL; it = it->next) {
- if (it->input_file != NULL)
- size += it->size - it->sparse;
- }
-
- return size;
-}
-
int fill_unpacked_files(fstree_t *fs, data_reader_t *data, int flags,
unsigned int num_jobs)
{
- file_info_t *sublists[num_jobs], *it;
+ file_info_t **sublists, *it;
int exitstatus, status = 0;
- uint64_t threshold;
unsigned int i;
pid_t pid;
- if (num_jobs <= 1) {
- status = fill_files(data, fs->files, flags);
+ if (num_jobs < 1)
+ num_jobs = 1;
- for (it = fs->files; it != NULL; it = it->next)
- free(it->input_file);
-
- return status;
- }
-
- threshold = total_size(fs->files) / num_jobs;
+ sublists = alloca(sizeof(sublists[0]) * num_jobs);
+ optimize_unpack_order(fs, num_jobs, sublists);
- for (i = 0; i < num_jobs; ++i) {
- sublists[i] = fs->files;
-
- fs->files = split_list(fs->files, threshold);
+ if (num_jobs < 2) {
+ status = fill_files(data, sublists[0], flags);
+ goto out;
}
for (i = 0; i < num_jobs; ++i) {
@@ -113,26 +72,31 @@ int fill_unpacked_files(fstree_t *fs, data_reader_t *data, int flags,
if (pid < 0) {
perror("fork");
status = -1;
- num_jobs = i;
break;
}
}
- for (i = 0; i < num_jobs; ++i) {
- do {
- pid = waitpid(-1, &exitstatus, 0);
+ for (;;) {
+ errno = 0;
+ pid = waitpid(-1, &exitstatus, 0);
+
+ if (pid < 0) {
+ if (errno == EINTR)
+ continue;
if (errno == ECHILD)
- goto out;
- } while (pid < 0);
+ break;
+ }
if (!WIFEXITED(exitstatus) ||
WEXITSTATUS(exitstatus) != EXIT_SUCCESS) {
status = -1;
}
-
+ }
+out:
+ for (i = 0; i < num_jobs; ++i) {
for (it = sublists[i]; it != NULL; it = it->next)
free(it->input_file);
}
-out:
+
return status;
}