aboutsummaryrefslogtreecommitdiff
path: root/tar
diff options
context:
space:
mode:
authorDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2020-04-27 11:59:02 +0200
committerDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2020-04-27 11:59:02 +0200
commit20b0d509f67dea802706cd6b80b5e20d14988931 (patch)
tree3a87ea358b1206f6823777693d109896d6908283 /tar
parent9e332a2d3eddcc262476ac263e03df021b3c44b4 (diff)
Cleanup directory structure of the binary programs
Instead of having the binary programs in randomly named subdirectories, move all of them to a "bin" subdirectory, similar to the utility libraries that have subdirectories within "lib" and give the subdirectories the propper names (e.g. have gensquashfs source in a directory *actually* named "gensquashfs"). Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at>
Diffstat (limited to 'tar')
-rw-r--r--tar/Makemodule.am12
-rw-r--r--tar/sqfs2tar.c688
-rw-r--r--tar/tar2sqfs.c544
3 files changed, 0 insertions, 1244 deletions
diff --git a/tar/Makemodule.am b/tar/Makemodule.am
deleted file mode 100644
index e065ef7..0000000
--- a/tar/Makemodule.am
+++ /dev/null
@@ -1,12 +0,0 @@
-sqfs2tar_SOURCES = tar/sqfs2tar.c
-sqfs2tar_CFLAGS = $(AM_CFLAGS) $(PTHREAD_CFLAGS)
-sqfs2tar_LDADD = libcommon.a libutil.a libsquashfs.la libtar.a libcompat.a
-sqfs2tar_LDADD += libfstree.a $(LZO_LIBS) $(PTHREAD_LIBS)
-
-tar2sqfs_SOURCES = tar/tar2sqfs.c
-tar2sqfs_CFLAGS = $(AM_CFLAGS) $(PTHREAD_CFLAGS)
-tar2sqfs_LDADD = libcommon.a libsquashfs.la libtar.a
-tar2sqfs_LDADD += libfstree.a libcompat.a libfstree.a $(LZO_LIBS)
-tar2sqfs_LDADD += $(PTHREAD_LIBS)
-
-bin_PROGRAMS += sqfs2tar tar2sqfs
diff --git a/tar/sqfs2tar.c b/tar/sqfs2tar.c
deleted file mode 100644
index 6d2a51a..0000000
--- a/tar/sqfs2tar.c
+++ /dev/null
@@ -1,688 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-/*
- * sqfs2tar.c
- *
- * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at>
- */
-#include "config.h"
-#include "common.h"
-#include "tar.h"
-
-#include <getopt.h>
-#include <string.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <stdio.h>
-
-static struct option long_opts[] = {
- { "subdir", required_argument, NULL, 'd' },
- { "keep-as-dir", no_argument, NULL, 'k' },
- { "root-becomes", required_argument, NULL, 'r' },
- { "no-skip", no_argument, NULL, 's' },
- { "no-xattr", no_argument, NULL, 'X' },
- { "no-hard-links", no_argument, NULL, 'L' },
- { "help", no_argument, NULL, 'h' },
- { "version", no_argument, NULL, 'V' },
- { NULL, 0, NULL, 0 },
-};
-
-static const char *short_opts = "d:kr:sXLhV";
-
-static const char *usagestr =
-"Usage: sqfs2tar [OPTIONS...] <sqfsfile>\n"
-"\n"
-"Read an input squashfs archive and turn it into a tar archive, written\n"
-"to stdout.\n"
-"\n"
-"Possible options:\n"
-"\n"
-" --subdir, -d <dir> Unpack the given sub directory instead of the\n"
-" filesystem root. Can be specified more than\n"
-" once to select multiple directories. If only\n"
-" one is specified, it becomes the new root of\n"
-" node of the archive file system tree.\n"
-"\n"
-" --root-becomes, -r <dir> Turn the root inode into a directory with the\n"
-" specified name. Everything else will be stored\n"
-" inside this directory. The special value '.' is\n"
-" allowed to prefix all tar paths with './' and\n"
-" add an entry named '.' for the root inode.\n"
-" If this option isn't used, all meta data stored\n"
-" in the root inode IS LOST!\n"
-"\n"
-" --keep-as-dir, -k If --subdir is used only once, don't make the\n"
-" subdir the archive root, instead keep it as\n"
-" prefix for all unpacked files.\n"
-" Using --subdir more than once implies\n"
-" --keep-as-dir.\n"
-" --no-xattr, -X Do not copy extended attributes.\n"
-" --no-hard-links, -L Do not generate hard links. Produce duplicate\n"
-" entries instead.\n"
-"\n"
-" --no-skip, -s Abort if a file cannot be stored in a tar\n"
-" archive. By default, it is simply skipped\n"
-" and a warning is written to stderr.\n"
-"\n"
-" --help, -h Print help text and exit.\n"
-" --version, -V Print version information and exit.\n"
-"\n"
-"Examples:\n"
-"\n"
-"\tsqfs2tar rootfs.sqfs > rootfs.tar\n"
-"\tsqfs2tar rootfs.sqfs | gzip > rootfs.tar.gz\n"
-"\tsqfs2tar rootfs.sqfs | xz > rootfs.tar.xz\n"
-"\n";
-
-static const char *filename;
-static unsigned int record_counter;
-static bool dont_skip = false;
-static bool keep_as_dir = false;
-static bool no_xattr = false;
-static bool no_links = false;
-
-static char *root_becomes = NULL;
-static char **subdirs = NULL;
-static size_t num_subdirs = 0;
-static size_t max_subdirs = 0;
-
-static sqfs_xattr_reader_t *xr;
-static sqfs_data_reader_t *data;
-static sqfs_file_t *file;
-static sqfs_super_t super;
-static sqfs_hard_link_t *links = NULL;
-
-static FILE *out_file = NULL;
-
-static void process_args(int argc, char **argv)
-{
- size_t idx, new_count;
- int i, ret;
- void *new;
-
- for (;;) {
- i = getopt_long(argc, argv, short_opts, long_opts, NULL);
- if (i == -1)
- break;
-
- switch (i) {
- case 'd':
- if (num_subdirs == max_subdirs) {
- new_count = max_subdirs ? max_subdirs * 2 : 16;
- new = realloc(subdirs,
- new_count * sizeof(subdirs[0]));
- if (new == NULL)
- goto fail_errno;
-
- max_subdirs = new_count;
- subdirs = new;
- }
-
- subdirs[num_subdirs] = strdup(optarg);
- if (subdirs[num_subdirs] == NULL)
- goto fail_errno;
-
- if (canonicalize_name(subdirs[num_subdirs])) {
- perror(optarg);
- goto fail;
- }
-
- ++num_subdirs;
- break;
- case 'r':
- free(root_becomes);
- root_becomes = strdup(optarg);
- if (root_becomes == NULL)
- goto fail_errno;
-
- if (strcmp(root_becomes, "./") == 0)
- root_becomes[1] = '\0';
-
- if (strcmp(root_becomes, ".") == 0)
- break;
-
- if (canonicalize_name(root_becomes) != 0 ||
- strlen(root_becomes) == 0) {
- fprintf(stderr,
- "Invalid root directory '%s'.\n",
- optarg);
- goto fail_arg;
- }
- break;
- case 'k':
- keep_as_dir = true;
- break;
- case 's':
- dont_skip = true;
- break;
- case 'X':
- no_xattr = true;
- break;
- case 'L':
- no_links = true;
- break;
- case 'h':
- fputs(usagestr, stdout);
- goto out_success;
- case 'V':
- print_version("sqfs2tar");
- goto out_success;
- default:
- goto fail_arg;
- }
- }
-
- if (optind >= argc) {
- fputs("Missing argument: squashfs image\n", stderr);
- goto fail_arg;
- }
-
- filename = argv[optind++];
-
- if (optind < argc) {
- fputs("Unknown extra arguments\n", stderr);
- goto fail_arg;
- }
-
- if (num_subdirs > 1)
- keep_as_dir = true;
-
- return;
-fail_errno:
- perror("parsing options");
- goto fail;
-fail_arg:
- fputs("Try `sqfs2tar --help' for more information.\n", stderr);
- goto fail;
-fail:
- ret = EXIT_FAILURE;
- goto out_exit;
-out_success:
- ret = EXIT_SUCCESS;
- goto out_exit;
-out_exit:
- for (idx = 0; idx < num_subdirs; ++idx)
- free(subdirs[idx]);
- free(root_becomes);
- free(subdirs);
- exit(ret);
-}
-
-static int terminate_archive(void)
-{
- char buffer[1024];
-
- memset(buffer, '\0', sizeof(buffer));
-
- return write_retry("adding archive terminator", out_file,
- buffer, sizeof(buffer));
-}
-
-static int get_xattrs(const char *name, const sqfs_inode_generic_t *inode,
- tar_xattr_t **out)
-{
- tar_xattr_t *list = NULL, *ent;
- sqfs_xattr_value_t *value;
- sqfs_xattr_entry_t *key;
- sqfs_xattr_id_t desc;
- sqfs_u32 index;
- size_t i;
- int ret;
-
- if (xr == NULL)
- return 0;
-
- sqfs_inode_get_xattr_index(inode, &index);
-
- if (index == 0xFFFFFFFF)
- return 0;
-
- ret = sqfs_xattr_reader_get_desc(xr, index, &desc);
- if (ret) {
- sqfs_perror(name, "resolving xattr index", ret);
- return -1;
- }
-
- ret = sqfs_xattr_reader_seek_kv(xr, &desc);
- if (ret) {
- sqfs_perror(name, "locating xattr key-value pairs", ret);
- return -1;
- }
-
- for (i = 0; i < desc.count; ++i) {
- ret = sqfs_xattr_reader_read_key(xr, &key);
- if (ret) {
- sqfs_perror(name, "reading xattr key", ret);
- goto fail;
- }
-
- ret = sqfs_xattr_reader_read_value(xr, key, &value);
- if (ret) {
- sqfs_perror(name, "reading xattr value", ret);
- free(key);
- goto fail;
- }
-
- ent = calloc(1, sizeof(*ent) + strlen((const char *)key->key) +
- value->size + 2);
- if (ent == NULL) {
- perror("creating xattr entry");
- free(key);
- free(value);
- goto fail;
- }
-
- ent->key = ent->data;
- strcpy(ent->key, (const char *)key->key);
-
- ent->value = (sqfs_u8 *)ent->key + strlen(ent->key) + 1;
- memcpy(ent->value, value->value, value->size + 1);
-
- ent->value_len = value->size;
- ent->next = list;
- list = ent;
-
- free(key);
- free(value);
- }
-
- *out = list;
- return 0;
-fail:
- while (list != NULL) {
- ent = list;
- list = list->next;
- free(ent);
- }
- return -1;
-}
-
-static char *assemble_tar_path(char *name, bool is_dir)
-{
- size_t len, new_len;
- char *temp;
- (void)is_dir;
-
- if (root_becomes == NULL && !is_dir)
- return name;
-
- new_len = strlen(name);
- if (root_becomes != NULL)
- new_len += strlen(root_becomes) + 1;
- if (is_dir)
- new_len += 1;
-
- temp = realloc(name, new_len + 1);
- if (temp == NULL) {
- perror("assembling tar entry filename");
- free(name);
- return NULL;
- }
-
- name = temp;
-
- if (root_becomes != NULL) {
- len = strlen(root_becomes);
-
- memmove(name + len + 1, name, strlen(name) + 1);
- memcpy(name, root_becomes, len);
- name[len] = '/';
- }
-
- if (is_dir) {
- len = strlen(name);
-
- if (len == 0 || name[len - 1] != '/') {
- name[len++] = '/';
- name[len] = '\0';
- }
- }
-
- return name;
-}
-
-static int write_tree_dfs(const sqfs_tree_node_t *n)
-{
- tar_xattr_t *xattr = NULL, *xit;
- sqfs_hard_link_t *lnk = NULL;
- char *name, *target;
- struct stat sb;
- size_t len;
- int ret;
-
- inode_stat(n, &sb);
-
- if (n->parent == NULL) {
- if (root_becomes == NULL)
- goto skip_hdr;
-
- len = strlen(root_becomes);
- name = malloc(len + 2);
- if (name == NULL) {
- perror("creating root directory");
- return -1;
- }
-
- memcpy(name, root_becomes, len);
- name[len] = '/';
- name[len + 1] = '\0';
- } else {
- if (!is_filename_sane((const char *)n->name, false)) {
- fprintf(stderr, "Found a file named '%s', skipping.\n",
- n->name);
- if (dont_skip) {
- fputs("Not allowed to skip files, aborting!\n",
- stderr);
- return -1;
- }
- return 0;
- }
-
- name = sqfs_tree_node_get_path(n);
- if (name == NULL) {
- perror("resolving tree node path");
- return -1;
- }
-
- if (canonicalize_name(name))
- goto out_skip;
-
- for (lnk = links; lnk != NULL; lnk = lnk->next) {
- if (lnk->inode_number == n->inode->base.inode_number) {
- if (strcmp(name, lnk->target) == 0)
- lnk = NULL;
- break;
- }
- }
-
- name = assemble_tar_path(name, S_ISDIR(sb.st_mode));
- if (name == NULL)
- return -1;
- }
-
- if (lnk != NULL) {
- ret = write_hard_link(out_file, &sb, name, lnk->target,
- record_counter++);
- free(name);
- return ret;
- }
-
- if (!no_xattr) {
- if (get_xattrs(name, n->inode, &xattr)) {
- free(name);
- return -1;
- }
- }
-
- target = S_ISLNK(sb.st_mode) ? (char *)n->inode->extra : NULL;
- ret = write_tar_header(out_file, &sb, name, target, xattr,
- record_counter++);
-
- while (xattr != NULL) {
- xit = xattr;
- xattr = xattr->next;
- free(xit);
- }
-
- if (ret > 0)
- goto out_skip;
-
- if (ret < 0) {
- free(name);
- return -1;
- }
-
- if (S_ISREG(sb.st_mode)) {
- if (sqfs_data_reader_dump(name, data, n->inode, out_file,
- super.block_size, false)) {
- free(name);
- return -1;
- }
-
- if (padd_file(out_file, sb.st_size)) {
- free(name);
- return -1;
- }
- }
-
- free(name);
-skip_hdr:
- for (n = n->children; n != NULL; n = n->next) {
- if (write_tree_dfs(n))
- return -1;
- }
- return 0;
-out_skip:
- if (dont_skip) {
- fputs("Not allowed to skip files, aborting!\n", stderr);
- ret = -1;
- } else {
- fprintf(stderr, "Skipping %s\n", name);
- ret = 0;
- }
- free(name);
- return ret;
-}
-
-static sqfs_tree_node_t *tree_merge(sqfs_tree_node_t *lhs,
- sqfs_tree_node_t *rhs)
-{
- sqfs_tree_node_t *head = NULL, **next_ptr = &head;
- sqfs_tree_node_t *it, *l, *r;
- int diff;
-
- while (lhs->children != NULL && rhs->children != NULL) {
- diff = strcmp((const char *)lhs->children->name,
- (const char *)rhs->children->name);
-
- if (diff < 0) {
- it = lhs->children;
- lhs->children = lhs->children->next;
- } else if (diff > 0) {
- it = rhs->children;
- rhs->children = rhs->children->next;
- } else {
- l = lhs->children;
- lhs->children = lhs->children->next;
-
- r = rhs->children;
- rhs->children = rhs->children->next;
-
- it = tree_merge(l, r);
- }
-
- *next_ptr = it;
- next_ptr = &it->next;
- }
-
- it = (lhs->children != NULL ? lhs->children : rhs->children);
- *next_ptr = it;
-
- sqfs_dir_tree_destroy(rhs);
- lhs->children = head;
- return lhs;
-}
-
-int main(int argc, char **argv)
-{
- sqfs_tree_node_t *root = NULL, *subtree;
- int flags, ret, status = EXIT_FAILURE;
- sqfs_compressor_config_t cfg;
- sqfs_compressor_t *cmp;
- sqfs_id_table_t *idtbl;
- sqfs_dir_reader_t *dr;
- sqfs_hard_link_t *lnk;
- size_t i;
-
- process_args(argc, argv);
-
-#ifdef _WIN32
- _setmode(_fileno(stdout), _O_BINARY);
- out_file = stdout;
-#else
- out_file = freopen(NULL, "wb", stdout);
-#endif
-
- if (out_file == NULL) {
- perror("changing stdout to binary mode");
- goto out_dirs;
- }
-
- file = sqfs_open_file(filename, SQFS_FILE_OPEN_READ_ONLY);
- if (file == NULL) {
- perror(filename);
- goto out_dirs;
- }
-
- ret = sqfs_super_read(&super, file);
- if (ret) {
- sqfs_perror(filename, "reading super block", ret);
- goto out_fd;
- }
-
- sqfs_compressor_config_init(&cfg, super.compression_id,
- super.block_size,
- SQFS_COMP_FLAG_UNCOMPRESS);
-
- ret = sqfs_compressor_create(&cfg, &cmp);
-
-#ifdef WITH_LZO
- if (super.compression_id == SQFS_COMP_LZO && ret != 0)
- ret = lzo_compressor_create(&cfg, &cmp);
-#endif
-
- if (ret != 0) {
- sqfs_perror(filename, "creating compressor", ret);
- goto out_fd;
- }
-
- idtbl = sqfs_id_table_create(0);
-
- if (idtbl == NULL) {
- perror("creating ID table");
- goto out_cmp;
- }
-
- ret = sqfs_id_table_read(idtbl, file, &super, cmp);
- if (ret) {
- sqfs_perror(filename, "loading ID table", ret);
- goto out_id;
- }
-
- data = sqfs_data_reader_create(file, super.block_size, cmp);
- if (data == NULL) {
- sqfs_perror(filename, "creating data reader",
- SQFS_ERROR_ALLOC);
- goto out_id;
- }
-
- ret = sqfs_data_reader_load_fragment_table(data, &super);
- if (ret) {
- sqfs_perror(filename, "loading fragment table", ret);
- goto out_data;
- }
-
- dr = sqfs_dir_reader_create(&super, cmp, file);
- if (dr == NULL) {
- sqfs_perror(filename, "creating dir reader",
- SQFS_ERROR_ALLOC);
- goto out_data;
- }
-
- if (!no_xattr && !(super.flags & SQFS_FLAG_NO_XATTRS)) {
- xr = sqfs_xattr_reader_create(0);
- if (xr == NULL) {
- sqfs_perror(filename, "creating xattr reader",
- SQFS_ERROR_ALLOC);
- goto out_dr;
- }
-
- ret = sqfs_xattr_reader_load(xr, &super, file, cmp);
- if (ret) {
- sqfs_perror(filename, "loading xattr table", ret);
- goto out_xr;
- }
- }
-
- if (num_subdirs == 0) {
- ret = sqfs_dir_reader_get_full_hierarchy(dr, idtbl, NULL,
- 0, &root);
- if (ret) {
- sqfs_perror(filename, "loading filesystem tree", ret);
- goto out;
- }
- } else {
- flags = 0;
-
- if (keep_as_dir || num_subdirs > 1)
- flags = SQFS_TREE_STORE_PARENTS;
-
- for (i = 0; i < num_subdirs; ++i) {
- ret = sqfs_dir_reader_get_full_hierarchy(dr, idtbl,
- subdirs[i],
- flags,
- &subtree);
- if (ret) {
- sqfs_perror(subdirs[i], "loading filesystem "
- "tree", ret);
- goto out;
- }
-
- if (root == NULL) {
- root = subtree;
- } else {
- root = tree_merge(root, subtree);
- }
- }
- }
-
- if (!no_links) {
- if (sqfs_tree_find_hard_links(root, &links))
- goto out_tree;
-
- for (lnk = links; lnk != NULL; lnk = lnk->next) {
- lnk->target = assemble_tar_path(lnk->target, false);
- if (lnk->target == NULL)
- goto out;
- }
- }
-
- if (write_tree_dfs(root))
- goto out;
-
- if (terminate_archive())
- goto out;
-
- status = EXIT_SUCCESS;
- fflush(out_file);
-out:
- while (links != NULL) {
- lnk = links;
- links = links->next;
- free(lnk->target);
- free(lnk);
- }
-out_tree:
- if (root != NULL)
- sqfs_dir_tree_destroy(root);
-out_xr:
- if (xr != NULL)
- sqfs_destroy(xr);
-out_dr:
- sqfs_destroy(dr);
-out_data:
- sqfs_destroy(data);
-out_id:
- sqfs_destroy(idtbl);
-out_cmp:
- sqfs_destroy(cmp);
-out_fd:
- sqfs_destroy(file);
-out_dirs:
- for (i = 0; i < num_subdirs; ++i)
- free(subdirs[i]);
- free(subdirs);
- free(root_becomes);
- return status;
-}
diff --git a/tar/tar2sqfs.c b/tar/tar2sqfs.c
deleted file mode 100644
index 6025dc9..0000000
--- a/tar/tar2sqfs.c
+++ /dev/null
@@ -1,544 +0,0 @@
-/* SPDX-License-Identifier: GPL-3.0-or-later */
-/*
- * tar2sqfs.c
- *
- * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at>
- */
-#include "config.h"
-#include "common.h"
-#include "compat.h"
-#include "tar.h"
-
-#include <stdlib.h>
-#include <getopt.h>
-#include <string.h>
-#include <stdio.h>
-#include <fcntl.h>
-
-#ifdef _WIN32
-#include <io.h>
-#endif
-
-static struct option long_opts[] = {
- { "root-becomes", required_argument, NULL, 'r' },
- { "compressor", required_argument, NULL, 'c' },
- { "block-size", required_argument, NULL, 'b' },
- { "dev-block-size", required_argument, NULL, 'B' },
- { "defaults", required_argument, NULL, 'd' },
- { "num-jobs", required_argument, NULL, 'j' },
- { "queue-backlog", required_argument, NULL, 'Q' },
- { "comp-extra", required_argument, NULL, 'X' },
- { "no-skip", no_argument, NULL, 's' },
- { "no-xattr", no_argument, NULL, 'x' },
- { "no-keep-time", no_argument, NULL, 'k' },
- { "exportable", no_argument, NULL, 'e' },
- { "no-tail-packing", no_argument, NULL, 'T' },
- { "force", no_argument, NULL, 'f' },
- { "quiet", no_argument, NULL, 'q' },
- { "help", no_argument, NULL, 'h' },
- { "version", no_argument, NULL, 'V' },
- { NULL, 0, NULL, 0 },
-};
-
-static const char *short_opts = "r:c:b:B:d:X:j:Q:sxekfqThV";
-
-static const char *usagestr =
-"Usage: tar2sqfs [OPTIONS...] <sqfsfile>\n"
-"\n"
-"Read an uncompressed tar archive from stdin and turn it into a squashfs\n"
-"filesystem image.\n"
-"\n"
-"Possible options:\n"
-"\n"
-" --root-becomes, -r <dir> The specified directory becomes the root.\n"
-" Only its children are packed into the image\n"
-" and its attributes (ownership, permissions,\n"
-" xattrs, ...) are stored in the root inode.\n"
-" If not set and a tarbal has an entry for './'\n"
-" or '/', it becomes the root instead.\n"
-"\n"
-" --compressor, -c <name> Select the compressor to use.\n"
-" A list of available compressors is below.\n"
-" --comp-extra, -X <options> A comma separated list of extra options for\n"
-" the selected compressor. Specify 'help' to\n"
-" get a list of available options.\n"
-" --num-jobs, -j <count> Number of compressor jobs to create.\n"
-" --queue-backlog, -Q <count> Maximum number of data blocks in the thread\n"
-" worker queue before the packer starts waiting\n"
-" for the block processors to catch up.\n"
-" Defaults to 10 times the number of jobs.\n"
-" --block-size, -b <size> Block size to use for Squashfs image.\n"
-" Defaults to %u.\n"
-" --dev-block-size, -B <size> Device block size to padd the image to.\n"
-" Defaults to %u.\n"
-" --defaults, -d <options> A comma separated list of default values for\n"
-" implicitly created directories.\n"
-"\n"
-" Possible options:\n"
-" uid=<value> 0 if not set.\n"
-" gid=<value> 0 if not set.\n"
-" mode=<value> 0755 if not set.\n"
-" mtime=<value> 0 if not set.\n"
-"\n"
-" --no-skip, -s Abort if a tar record cannot be read instead\n"
-" of skipping it.\n"
-" --no-xattr, -x Do not copy extended attributes from archive.\n"
-" --no-keep-time, -k Do not keep the time stamps stored in the\n"
-" archive. Instead, set defaults on all files.\n"
-" --exportable, -e Generate an export table for NFS support.\n"
-" --no-tail-packing, -T Do not perform tail end packing on files that\n"
-" are larger than block size.\n"
-" --force, -f Overwrite the output file if it exists.\n"
-" --quiet, -q Do not print out progress reports.\n"
-" --help, -h Print help text and exit.\n"
-" --version, -V Print version information and exit.\n"
-"\n"
-"Examples:\n"
-"\n"
-"\ttar2sqfs rootfs.sqfs < rootfs.tar\n"
-"\tzcat rootfs.tar.gz | tar2sqfs rootfs.sqfs\n"
-"\txzcat rootfs.tar.xz | tar2sqfs rootfs.sqfs\n"
-"\n";
-
-static bool dont_skip = false;
-static bool keep_time = true;
-static bool no_tail_pack = false;
-static sqfs_writer_cfg_t cfg;
-static sqfs_writer_t sqfs;
-static FILE *input_file = NULL;
-static char *root_becomes = NULL;
-
-static void process_args(int argc, char **argv)
-{
- bool have_compressor;
- int i, ret;
-
- sqfs_writer_cfg_init(&cfg);
-
- for (;;) {
- i = getopt_long(argc, argv, short_opts, long_opts, NULL);
- if (i == -1)
- break;
-
- switch (i) {
- case 'T':
- no_tail_pack = true;
- break;
- case 'b':
- if (parse_size("Block size", &cfg.block_size,
- optarg, 0)) {
- exit(EXIT_FAILURE);
- }
- break;
- case 'B':
- if (parse_size("Device block size", &cfg.devblksize,
- optarg, 0)) {
- exit(EXIT_FAILURE);
- }
- if (cfg.devblksize < 1024) {
- fputs("Device block size must be at "
- "least 1024\n", stderr);
- exit(EXIT_FAILURE);
- }
- break;
- case 'c':
- have_compressor = true;
- ret = sqfs_compressor_id_from_name(optarg);
-
- if (ret < 0) {
- have_compressor = false;
-#ifdef WITH_LZO
- if (cfg.comp_id == SQFS_COMP_LZO)
- have_compressor = true;
-#endif
- }
-
- if (!have_compressor) {
- fprintf(stderr, "Unsupported compressor '%s'\n",
- optarg);
- exit(EXIT_FAILURE);
- }
-
- cfg.comp_id = ret;
- break;
- case 'j':
- cfg.num_jobs = strtol(optarg, NULL, 0);
- break;
- case 'Q':
- cfg.max_backlog = strtol(optarg, NULL, 0);
- break;
- case 'X':
- cfg.comp_extra = optarg;
- break;
- case 'd':
- cfg.fs_defaults = optarg;
- break;
- case 'x':
- cfg.no_xattr = true;
- break;
- case 'k':
- keep_time = false;
- break;
- case 'r':
- free(root_becomes);
- root_becomes = strdup(optarg);
- if (root_becomes == NULL) {
- perror("copying root directory name");
- exit(EXIT_FAILURE);
- }
-
- if (canonicalize_name(root_becomes) != 0 ||
- strlen(root_becomes) == 0) {
- fprintf(stderr,
- "Invalid root directory '%s'.\n",
- optarg);
- goto fail_arg;
- }
- break;
- case 's':
- dont_skip = true;
- break;
- case 'e':
- cfg.exportable = true;
- break;
- case 'f':
- cfg.outmode |= SQFS_FILE_OPEN_OVERWRITE;
- break;
- case 'q':
- cfg.quiet = true;
- break;
- case 'h':
- printf(usagestr, SQFS_DEFAULT_BLOCK_SIZE,
- SQFS_DEVBLK_SIZE);
- compressor_print_available();
- exit(EXIT_SUCCESS);
- case 'V':
- print_version("tar2sqfs");
- exit(EXIT_SUCCESS);
- default:
- goto fail_arg;
- }
- }
-
- if (cfg.num_jobs < 1)
- cfg.num_jobs = 1;
-
- if (cfg.max_backlog < 1)
- cfg.max_backlog = 10 * cfg.num_jobs;
-
- if (cfg.comp_extra != NULL && strcmp(cfg.comp_extra, "help") == 0) {
- compressor_print_help(cfg.comp_id);
- exit(EXIT_SUCCESS);
- }
-
- if (optind >= argc) {
- fputs("Missing argument: squashfs image\n", stderr);
- goto fail_arg;
- }
-
- cfg.filename = argv[optind++];
-
- if (optind < argc) {
- fputs("Unknown extra arguments\n", stderr);
- goto fail_arg;
- }
- return;
-fail_arg:
- fputs("Try `tar2sqfs --help' for more information.\n", stderr);
- exit(EXIT_FAILURE);
-}
-
-static int write_file(tar_header_decoded_t *hdr, file_info_t *fi,
- sqfs_u64 filesize)
-{
- sqfs_file_t *file;
- int flags;
- int ret;
-
- file = sqfs_get_stdin_file(input_file, hdr->sparse, filesize);
- if (file == NULL) {
- perror("packing files");
- return -1;
- }
-
- flags = 0;
- if (no_tail_pack && filesize > cfg.block_size)
- flags |= SQFS_BLK_DONT_FRAGMENT;
-
- ret = write_data_from_file(hdr->name, sqfs.data,
- (sqfs_inode_generic_t **)&fi->user_ptr,
- file, flags);
- sqfs_destroy(file);
-
- if (ret)
- return -1;
-
- return skip_padding(input_file, hdr->sparse == NULL ?
- filesize : hdr->record_size);
-}
-
-static int copy_xattr(tree_node_t *node, const tar_header_decoded_t *hdr)
-{
- tar_xattr_t *xattr;
- int ret;
-
- ret = sqfs_xattr_writer_begin(sqfs.xwr);
- if (ret) {
- sqfs_perror(hdr->name, "beginning xattr block", ret);
- return -1;
- }
-
- for (xattr = hdr->xattr; xattr != NULL; xattr = xattr->next) {
- if (sqfs_get_xattr_prefix_id(xattr->key) < 0) {
- fprintf(stderr, "%s: squashfs does not "
- "support xattr prefix of %s\n",
- dont_skip ? "ERROR" : "WARNING",
- xattr->key);
-
- if (dont_skip)
- return -1;
- continue;
- }
-
- ret = sqfs_xattr_writer_add(sqfs.xwr, xattr->key, xattr->value,
- xattr->value_len);
- if (ret) {
- sqfs_perror(hdr->name, "storing xattr key-value pair",
- ret);
- return -1;
- }
- }
-
- ret = sqfs_xattr_writer_end(sqfs.xwr, &node->xattr_idx);
- if (ret) {
- sqfs_perror(hdr->name, "completing xattr block", ret);
- return -1;
- }
-
- return 0;
-}
-
-static int create_node_and_repack_data(tar_header_decoded_t *hdr)
-{
- tree_node_t *node;
-
- if (hdr->is_hard_link) {
- node = fstree_add_hard_link(&sqfs.fs, hdr->name,
- hdr->link_target);
- if (node == NULL)
- goto fail_errno;
-
- if (!cfg.quiet) {
- printf("Hard link %s -> %s\n", hdr->name,
- hdr->link_target);
- }
- return 0;
- }
-
- if (!keep_time) {
- hdr->sb.st_mtime = sqfs.fs.defaults.st_mtime;
- }
-
- node = fstree_add_generic(&sqfs.fs, hdr->name,
- &hdr->sb, hdr->link_target);
- if (node == NULL)
- goto fail_errno;
-
- if (!cfg.quiet)
- printf("Packing %s\n", hdr->name);
-
- if (!cfg.no_xattr) {
- if (copy_xattr(node, hdr))
- return -1;
- }
-
- if (S_ISREG(hdr->sb.st_mode)) {
- if (write_file(hdr, &node->data.file, hdr->sb.st_size))
- return -1;
- }
-
- return 0;
-fail_errno:
- perror(hdr->name);
- return -1;
-}
-
-static int set_root_attribs(const tar_header_decoded_t *hdr)
-{
- if (hdr->is_hard_link || !S_ISDIR(hdr->sb.st_mode)) {
- fprintf(stderr, "'%s' is not a directory!\n", hdr->name);
- return -1;
- }
-
- sqfs.fs.root->uid = hdr->sb.st_uid;
- sqfs.fs.root->gid = hdr->sb.st_gid;
- sqfs.fs.root->mode = hdr->sb.st_mode;
-
- if (keep_time)
- sqfs.fs.root->mod_time = hdr->sb.st_mtime;
-
- if (!cfg.no_xattr) {
- if (copy_xattr(sqfs.fs.root, hdr))
- return -1;
- }
-
- return 0;
-}
-
-static int process_tar_ball(void)
-{
- bool skip, is_root, is_prefixed;
- tar_header_decoded_t hdr;
- sqfs_u64 offset, count;
- sparse_map_t *m;
- size_t rootlen;
- int ret;
-
- rootlen = root_becomes == NULL ? 0 : strlen(root_becomes);
-
- for (;;) {
- ret = read_header(input_file, &hdr);
- if (ret > 0)
- break;
- if (ret < 0)
- return -1;
-
- if (hdr.mtime < 0)
- hdr.mtime = 0;
-
- if ((sqfs_u64)hdr.mtime > 0x0FFFFFFFFUL)
- hdr.mtime = 0x0FFFFFFFFUL;
-
- hdr.sb.st_mtime = hdr.mtime;
-
- skip = false;
- is_root = false;
- is_prefixed = true;
-
- if (hdr.name == NULL || canonicalize_name(hdr.name) != 0) {
- fprintf(stderr, "skipping '%s' (invalid name)\n",
- hdr.name);
- skip = true;
- }
-
- if (root_becomes != NULL) {
- if (strncmp(hdr.name, root_becomes, rootlen) == 0) {
- if (hdr.name[rootlen] == '\0') {
- is_root = true;
- } else if (hdr.name[rootlen] != '/') {
- is_prefixed = false;
- }
- } else {
- is_prefixed = false;
- }
-
- if (is_prefixed && !is_root) {
- memmove(hdr.name, hdr.name + rootlen + 1,
- strlen(hdr.name + rootlen + 1) + 1);
- }
-
- if (is_prefixed && hdr.name[0] == '\0') {
- fputs("skipping entry with empty name\n",
- stderr);
- skip = true;
- }
- } else if (hdr.name[0] == '\0') {
- is_root = true;
- }
-
- if (!is_prefixed) {
- clear_header(&hdr);
- continue;
- }
-
- if (is_root) {
- if (set_root_attribs(&hdr))
- goto fail;
- clear_header(&hdr);
- continue;
- }
-
- if (!skip && hdr.unknown_record) {
- fprintf(stderr, "%s: unknown entry type\n", hdr.name);
- skip = true;
- }
-
- if (!skip && hdr.sparse != NULL) {
- offset = hdr.sparse->offset;
- count = 0;
-
- for (m = hdr.sparse; m != NULL; m = m->next) {
- if (m->offset < offset) {
- skip = true;
- break;
- }
- offset = m->offset + m->count;
- count += m->count;
- }
-
- if (count != hdr.record_size)
- skip = true;
-
- if (skip) {
- fprintf(stderr, "%s: broken sparse "
- "file layout\n", hdr.name);
- }
- }
-
- if (skip) {
- if (dont_skip)
- goto fail;
- if (skip_entry(input_file, hdr.sb.st_size))
- goto fail;
-
- clear_header(&hdr);
- continue;
- }
-
- if (create_node_and_repack_data(&hdr))
- goto fail;
-
- clear_header(&hdr);
- }
-
- return 0;
-fail:
- clear_header(&hdr);
- return -1;
-}
-
-int main(int argc, char **argv)
-{
- int status = EXIT_FAILURE;
-
- process_args(argc, argv);
-
-#ifdef _WIN32
- _setmode(_fileno(stdin), _O_BINARY);
- input_file = stdin;
-#else
- input_file = freopen(NULL, "rb", stdin);
-#endif
-
- if (input_file == NULL) {
- perror("changing stdin to binary mode");
- return EXIT_FAILURE;
- }
-
- if (sqfs_writer_init(&sqfs, &cfg))
- return EXIT_FAILURE;
-
- if (process_tar_ball())
- goto out;
-
- if (fstree_post_process(&sqfs.fs))
- goto out;
-
- if (sqfs_writer_finish(&sqfs, &cfg))
- goto out;
-
- status = EXIT_SUCCESS;
-out:
- sqfs_writer_cleanup(&sqfs, status);
- return status;
-}