diff options
Diffstat (limited to 'lib/sqfshelper')
-rw-r--r-- | lib/sqfshelper/data_reader.c | 314 | ||||
-rw-r--r-- | lib/sqfshelper/data_writer.c | 528 | ||||
-rw-r--r-- | lib/sqfshelper/deserialize_fstree.c | 304 | ||||
-rw-r--r-- | lib/sqfshelper/serialize_fstree.c | 65 | ||||
-rw-r--r-- | lib/sqfshelper/sqfs_reader.c | 75 | ||||
-rw-r--r-- | lib/sqfshelper/statistics.c | 82 | ||||
-rw-r--r-- | lib/sqfshelper/tree_node_from_inode.c | 167 | ||||
-rw-r--r-- | lib/sqfshelper/write_dir.c | 141 | ||||
-rw-r--r-- | lib/sqfshelper/write_export_table.c | 47 | ||||
-rw-r--r-- | lib/sqfshelper/write_inode.c | 327 | ||||
-rw-r--r-- | lib/sqfshelper/write_xattr.c | 279 | ||||
-rw-r--r-- | lib/sqfshelper/xattr_reader.c | 393 |
12 files changed, 2722 insertions, 0 deletions
diff --git a/lib/sqfshelper/data_reader.c b/lib/sqfshelper/data_reader.c new file mode 100644 index 0000000..4ad6266 --- /dev/null +++ b/lib/sqfshelper/data_reader.c @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * data_reader.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "data_reader.h" +#include "highlevel.h" +#include "util.h" + +#include <stdlib.h> +#include <unistd.h> +#include <string.h> +#include <stdio.h> + +struct data_reader_t { + sqfs_fragment_t *frag; + size_t num_fragments; + size_t current_frag_index; + size_t frag_used; + + off_t current_block; + + compressor_t *cmp; + size_t block_size; + int sqfsfd; + + void *block; + void *scratch; + void *frag_block; +}; + +static ssize_t read_block(data_reader_t *data, off_t offset, uint32_t size, + void *dst) +{ + bool compressed = SQFS_IS_BLOCK_COMPRESSED(size); + void *ptr = compressed ? data->scratch : dst; + ssize_t ret; + + size = SQFS_ON_DISK_BLOCK_SIZE(size); + + if (size > data->block_size) + goto fail_bs; + + if (read_data_at("reading block", offset, data->sqfsfd, ptr, size)) + return -1; + + if (compressed) { + ret = data->cmp->do_block(data->cmp, data->scratch, size, + dst, data->block_size); + if (ret <= 0) { + fputs("extracting block failed\n", stderr); + return -1; + } + size = ret; + } + + return size; +fail_bs: + fputs("found compressed block larger than block size\n", stderr); + return -1; +} + +static int precache_data_block(data_reader_t *data, off_t location, + uint32_t size) +{ + ssize_t ret; + + if (data->current_block == location) + return 0; + + ret = read_block(data, location, size, data->block); + if (ret < 0) + return -1; + + if ((size_t)ret < data->block_size) + memset((char *)data->block + ret, 0, data->block_size - ret); + + data->current_block = location; + return 0; +} + +static int precache_fragment_block(data_reader_t *data, size_t idx) +{ + ssize_t ret; + + if (idx == data->current_frag_index) + return 0; + + if (idx >= data->num_fragments) { + fputs("fragment index out of bounds\n", stderr); + return -1; + } + + ret = read_block(data, data->frag[idx].start_offset, + data->frag[idx].size, data->frag_block); + if (ret < 0) + return -1; + + data->current_frag_index = idx; + data->frag_used = ret; + return 0; +} + +data_reader_t *data_reader_create(int fd, sqfs_super_t *super, + compressor_t *cmp) +{ + data_reader_t *data = alloc_flex(sizeof(*data), super->block_size, 3); + size_t i, size; + + if (data == NULL) { + perror("creating data reader"); + return data; + } + + data->num_fragments = super->fragment_entry_count; + data->current_frag_index = super->fragment_entry_count; + data->block = (char *)data + sizeof(*data); + data->scratch = (char *)data->block + super->block_size; + data->frag_block = (char *)data->scratch + super->block_size; + data->current_block = -1; + data->sqfsfd = fd; + data->block_size = super->block_size; + data->cmp = cmp; + + if (super->fragment_entry_count == 0 || + (super->flags & SQFS_FLAG_NO_FRAGMENTS) != 0) { + return data; + } + + if (super->fragment_table_start >= super->bytes_used) { + fputs("Fragment table start is past end of file\n", stderr); + free(data); + return NULL; + } + + if (SZ_MUL_OV(sizeof(data->frag[0]), data->num_fragments, &size)) { + fputs("Too many fragments: overflow\n", stderr); + free(data); + return NULL; + } + + data->frag = sqfs_read_table(fd, cmp, size, + super->fragment_table_start, + super->directory_table_start, + super->fragment_table_start); + if (data->frag == NULL) { + free(data); + return NULL; + } + + for (i = 0; i < data->num_fragments; ++i) { + data->frag[i].size = le32toh(data->frag[i].size); + data->frag[i].start_offset = + le64toh(data->frag[i].start_offset); + } + + return data; +} + +void data_reader_destroy(data_reader_t *data) +{ + free(data->frag); + free(data); +} + +int data_reader_dump_file(data_reader_t *data, file_info_t *fi, int outfd, + bool allow_sparse) +{ + uint64_t filesz = fi->size; + size_t fragsz = fi->size % data->block_size; + size_t count = fi->size / data->block_size; + off_t off = fi->startblock; + size_t i, diff; + + if (fragsz != 0 && !(fi->flags & FILE_FLAG_HAS_FRAGMENT)) { + fragsz = 0; + ++count; + } + + if (allow_sparse && ftruncate(outfd, filesz)) + goto fail_sparse; + + for (i = 0; i < count; ++i) { + diff = filesz > data->block_size ? data->block_size : filesz; + filesz -= diff; + + if (SQFS_IS_SPARSE_BLOCK(fi->blocks[i].size)) { + if (allow_sparse) { + if (lseek(outfd, diff, SEEK_CUR) == (off_t)-1) + goto fail_sparse; + continue; + } + memset(data->block, 0, diff); + } else { + if (precache_data_block(data, off, fi->blocks[i].size)) + return -1; + off += SQFS_ON_DISK_BLOCK_SIZE(fi->blocks[i].size); + } + + if (write_data("writing uncompressed block", + outfd, data->block, diff)) { + return -1; + } + } + + if (fragsz > 0) { + if (precache_fragment_block(data, fi->fragment)) + return -1; + + if (fi->fragment_offset >= data->frag_used) + goto fail_range; + + if ((fi->fragment_offset + fragsz - 1) >= data->frag_used) + goto fail_range; + + if (write_data("writing uncompressed fragment", outfd, + (char *)data->frag_block + fi->fragment_offset, + fragsz)) { + return -1; + } + } + + return 0; +fail_range: + fputs("attempted to read past fragment block limits\n", stderr); + return -1; +fail_sparse: + perror("creating sparse output file"); + return -1; +} + +ssize_t data_reader_read(data_reader_t *data, file_info_t *fi, + uint64_t offset, void *buffer, size_t size) +{ + size_t i, diff, fragsz, count, total = 0; + off_t off; + char *ptr; + + /* work out block count and fragment size */ + fragsz = fi->size % data->block_size; + count = fi->size / data->block_size; + + if (fragsz != 0 && !(fi->flags & FILE_FLAG_HAS_FRAGMENT)) { + fragsz = 0; + ++count; + } + + /* work out block index and on-disk location */ + off = fi->startblock; + i = 0; + + while (offset > data->block_size && i < count) { + off += SQFS_ON_DISK_BLOCK_SIZE(fi->blocks[i++].size); + offset -= data->block_size; + } + + /* copy data from blocks */ + while (i < count && size > 0) { + diff = data->block_size - offset; + if (size < diff) + size = diff; + + if (SQFS_IS_SPARSE_BLOCK(fi->blocks[i].size)) { + memset(buffer, 0, diff); + } else { + if (precache_data_block(data, off, fi->blocks[i].size)) + return -1; + + memcpy(buffer, (char *)data->block + offset, diff); + off += SQFS_ON_DISK_BLOCK_SIZE(fi->blocks[i].size); + } + + ++i; + offset = 0; + size -= diff; + total += diff; + buffer = (char *)buffer + diff; + } + + /* copy from fragment */ + if (i == count && size > 0 && fragsz > 0) { + if (precache_fragment_block(data, fi->fragment)) + return -1; + + if (fi->fragment_offset >= data->frag_used) + goto fail_range; + + if ((fi->fragment_offset + fragsz - 1) >= data->frag_used) + goto fail_range; + + ptr = (char *)data->frag_block + fi->fragment_offset; + ptr += offset; + + if (offset >= fragsz) { + offset = 0; + size = 0; + } + + if (offset + size > fragsz) + size = fragsz - offset; + + if (size > 0) { + memcpy(buffer, ptr + offset, size); + total += size; + } + } + return total; +fail_range: + fputs("attempted to read past fragment block limits\n", stderr); + return -1; +} diff --git a/lib/sqfshelper/data_writer.c b/lib/sqfshelper/data_writer.c new file mode 100644 index 0000000..d4b402b --- /dev/null +++ b/lib/sqfshelper/data_writer.c @@ -0,0 +1,528 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * data_writer.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "block_processor.h" +#include "data_writer.h" +#include "highlevel.h" +#include "util.h" + +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <stdio.h> +#include <errno.h> +#include <zlib.h> + +struct data_writer_t { + block_t *frag_block; + sqfs_fragment_t *fragments; + size_t num_fragments; + size_t max_fragments; + + size_t devblksz; + uint64_t bytes_written; + off_t start; + + block_processor_t *proc; + compressor_t *cmp; + file_info_t *list; + sqfs_super_t *super; + int outfd; +}; + +enum { + BLK_FIRST_BLOCK = BLK_USER, + BLK_LAST_BLOCK = BLK_USER << 1, + BLK_ALLIGN = BLK_USER << 2, + BLK_FRAGMENT_BLOCK = BLK_USER << 3, +}; + +static int save_position(data_writer_t *data) +{ + data->bytes_written = data->super->bytes_used; + data->start = lseek(data->outfd, 0, SEEK_CUR); + + if (data->start == (off_t)-1) { + perror("querying current position on squashfs image"); + return -1; + } + + return 0; +} + +static int restore_position(data_writer_t *data) +{ + if (lseek(data->outfd, data->start, SEEK_SET) == (off_t)-1) + goto fail_seek; + + if (ftruncate(data->outfd, data->start)) + goto fail_truncate; + + data->super->bytes_used = data->bytes_written; + return 0; +fail_seek: + perror("seeking on squashfs image after file deduplication"); + return -1; +fail_truncate: + perror("truncating squashfs image after file deduplication"); + return -1; +} + +static int allign_file(data_writer_t *data) +{ + size_t diff = data->super->bytes_used % data->devblksz; + + if (diff == 0) + return 0; + + if (padd_file(data->outfd, data->super->bytes_used, data->devblksz)) + return -1; + + data->super->bytes_used += data->devblksz - diff; + return 0; +} + +static int block_callback(void *user, block_t *blk) +{ + file_info_t *fi = blk->user; + data_writer_t *data = user; + uint64_t ref, offset; + uint32_t out; + + if (blk->flags & BLK_FIRST_BLOCK) { + if (save_position(data)) + return -1; + + if ((blk->flags & BLK_ALLIGN) && allign_file(data) != 0) + return -1; + + fi->startblock = data->super->bytes_used; + } + + if (blk->size == 0) + goto skip_sentinel; + + out = blk->size; + if (!(blk->flags & BLK_IS_COMPRESSED)) + out |= 1 << 24; + + if (blk->flags & BLK_FRAGMENT_BLOCK) { + offset = htole64(data->super->bytes_used); + data->fragments[blk->index].start_offset = offset; + data->fragments[blk->index].pad0 = 0; + data->fragments[blk->index].size = htole32(out); + + data->super->flags &= ~SQFS_FLAG_NO_FRAGMENTS; + data->super->flags |= SQFS_FLAG_ALWAYS_FRAGMENTS; + } else { + fi->blocks[blk->index].chksum = blk->checksum; + fi->blocks[blk->index].size = htole32(out); + } + + if (write_data("writing data block", data->outfd, + blk->data, blk->size)) { + return -1; + } + + data->super->bytes_used += blk->size; + +skip_sentinel: + if (blk->flags & BLK_LAST_BLOCK) { + if ((blk->flags & BLK_ALLIGN) && allign_file(data) != 0) + return -1; + + ref = find_equal_blocks(fi, data->list, + data->super->block_size); + if (ref > 0) { + fi->startblock = ref; + fi->flags |= FILE_FLAG_BLOCKS_ARE_DUPLICATE; + + if (restore_position(data)) + return -1; + } + } + + return 0; +} + +/*****************************************************************************/ + +static int flush_fragment_block(data_writer_t *data) +{ + size_t newsz; + void *new; + int ret; + + if (data->num_fragments == data->max_fragments) { + newsz = data->max_fragments ? data->max_fragments * 2 : 16; + new = realloc(data->fragments, + sizeof(data->fragments[0]) * newsz); + + if (new == NULL) { + perror("appending to fragment table"); + return -1; + } + + data->max_fragments = newsz; + data->fragments = new; + } + + data->frag_block->index = data->num_fragments++; + + ret = block_processor_enqueue(data->proc, data->frag_block); + data->frag_block = NULL; + return ret; +} + +static int store_fragment(data_writer_t *data, block_t *frag) +{ + file_info_t *fi = frag->user; + size_t size; + + if (data->frag_block != NULL) { + size = data->frag_block->size + frag->size; + + if (size > data->super->block_size) { + if (flush_fragment_block(data)) + goto fail; + } + } + + if (data->frag_block == NULL) { + size = sizeof(block_t) + data->super->block_size; + + data->frag_block = calloc(1, size); + if (data->frag_block == NULL) { + perror("creating fragment block"); + goto fail; + } + + data->frag_block->flags = BLK_DONT_CHECKSUM; + data->frag_block->flags |= BLK_FRAGMENT_BLOCK; + } + + fi->fragment_offset = data->frag_block->size; + fi->fragment = data->num_fragments; + + data->frag_block->flags |= (frag->flags & BLK_DONT_COMPRESS); + memcpy(data->frag_block->data + data->frag_block->size, + frag->data, frag->size); + + data->frag_block->size += frag->size; + free(frag); + return 0; +fail: + free(frag); + return -1; +} + +static bool is_zero_block(unsigned char *ptr, size_t size) +{ + return ptr[0] == 0 && memcmp(ptr, ptr + 1, size - 1) == 0; +} + +static int handle_fragment(data_writer_t *data, block_t *blk) +{ + file_info_t *fi = blk->user, *ref; + + fi->fragment_chksum = crc32(0, blk->data, blk->size); + + ref = fragment_by_chksum(fi, fi->fragment_chksum, blk->size, + data->list, data->super->block_size); + + if (ref != NULL) { + fi->fragment_offset = ref->fragment_offset; + fi->fragment = ref->fragment; + fi->flags |= FILE_FLAG_FRAGMENT_IS_DUPLICATE; + free(blk); + return 0; + } + + return store_fragment(data, blk); +} + +static int add_sentinel_block(data_writer_t *data, file_info_t *fi, + uint32_t flags) +{ + block_t *blk = calloc(1, sizeof(*blk)); + + if (blk == NULL) { + perror("creating sentinel block"); + return -1; + } + + blk->user = fi; + blk->flags = BLK_DONT_COMPRESS | BLK_DONT_CHECKSUM | flags; + + return block_processor_enqueue(data->proc, blk); +} + +int write_data_from_fd(data_writer_t *data, file_info_t *fi, + int infd, int flags) +{ + uint32_t blk_flags = BLK_FIRST_BLOCK; + uint64_t file_size = fi->size; + size_t diff, i = 0; + block_t *blk; + + if (flags & DW_DONT_COMPRESS) + blk_flags |= BLK_DONT_COMPRESS; + + if (flags & DW_ALLIGN_DEVBLK) + blk_flags |= BLK_ALLIGN; + + fi->next = data->list; + data->list = fi; + + for (; file_size > 0; file_size -= diff) { + if (file_size > data->super->block_size) { + diff = data->super->block_size; + } else { + diff = file_size; + } + + blk = create_block(fi->input_file, infd, diff, fi, blk_flags); + if (blk == NULL) + return -1; + + blk->index = i++; + + if (is_zero_block(blk->data, blk->size)) { + fi->blocks[blk->index].chksum = 0; + fi->blocks[blk->index].size = 0; + free(blk); + continue; + } + + if (diff < data->super->block_size && + !(flags & DW_DONT_FRAGMENT)) { + fi->flags |= FILE_FLAG_HAS_FRAGMENT; + + if (!(blk_flags & (BLK_FIRST_BLOCK | BLK_LAST_BLOCK))) { + blk_flags |= BLK_LAST_BLOCK; + + if (add_sentinel_block(data, fi, blk_flags)) { + free(blk); + return -1; + } + } + + if (handle_fragment(data, blk)) + return -1; + } else { + if (block_processor_enqueue(data->proc, blk)) + return -1; + + blk_flags &= ~BLK_FIRST_BLOCK; + } + } + + if (!(blk_flags & (BLK_FIRST_BLOCK | BLK_LAST_BLOCK))) { + blk_flags |= BLK_LAST_BLOCK; + + if (add_sentinel_block(data, fi, blk_flags)) + return -1; + } + + return 0; +} + +static int check_map_valid(const sparse_map_t *map, file_info_t *fi) +{ + const sparse_map_t *m; + uint64_t offset; + + if (map != NULL) { + offset = map->offset; + + for (m = map; m != NULL; m = m->next) { + if (m->offset < offset) + goto fail_map; + offset = m->offset + m->count; + } + + if (offset > fi->size) + goto fail_map_size; + } + + return 0; +fail_map_size: + fprintf(stderr, "%s: sparse file map spans beyond file size\n", + fi->input_file); + return -1; +fail_map: + fprintf(stderr, + "%s: sparse file map is unordered or self overlapping\n", + fi->input_file); + return -1; +} + +static int get_sparse_block(block_t *blk, file_info_t *fi, int infd, + sparse_map_t **sparse_map, uint64_t offset, + size_t diff) +{ + sparse_map_t *map = *sparse_map; + size_t start, count; + + while (map != NULL && map->offset < offset + diff) { + start = 0; + count = map->count; + + if (map->offset < offset) + count -= offset - map->offset; + + if (map->offset > offset) + start = map->offset - offset; + + if (start + count > diff) + count = diff - start; + + if (read_data(fi->input_file, infd, blk->data + start, count)) + return -1; + + map = map->next; + } + + *sparse_map = map; + return 0; +} + +int write_data_from_fd_condensed(data_writer_t *data, file_info_t *fi, + int infd, sparse_map_t *map, int flags) +{ + uint32_t blk_flags = BLK_FIRST_BLOCK; + size_t diff, i = 0; + uint64_t offset; + block_t *blk; + + if (check_map_valid(map, fi)) + return -1; + + if (flags & DW_DONT_COMPRESS) + blk_flags |= BLK_DONT_COMPRESS; + + if (flags & DW_ALLIGN_DEVBLK) + blk_flags |= BLK_ALLIGN; + + for (offset = 0; offset < fi->size; offset += diff) { + if (fi->size - offset > (uint64_t)data->super->block_size) { + diff = data->super->block_size; + } else { + diff = fi->size - offset; + } + + blk = alloc_flex(sizeof(*blk), 1, diff); + blk->size = diff; + blk->index = i++; + blk->user = fi; + blk->flags = blk_flags; + + if (get_sparse_block(blk, fi, infd, &map, offset, diff)) { + free(blk); + return -1; + } + + if (is_zero_block(blk->data, blk->size)) { + fi->blocks[blk->index].chksum = 0; + fi->blocks[blk->index].size = 0; + free(blk); + continue; + } + + if (diff < data->super->block_size && + !(flags & DW_DONT_FRAGMENT)) { + fi->flags |= FILE_FLAG_HAS_FRAGMENT; + + if (!(blk_flags & (BLK_FIRST_BLOCK | BLK_LAST_BLOCK))) { + blk_flags |= BLK_LAST_BLOCK; + + if (add_sentinel_block(data, fi, blk_flags)) { + free(blk); + return -1; + } + } + + if (handle_fragment(data, blk)) + return -1; + } else { + if (block_processor_enqueue(data->proc, blk)) + return -1; + + blk_flags &= ~BLK_FIRST_BLOCK; + } + } + + if (!(blk_flags & (BLK_FIRST_BLOCK | BLK_LAST_BLOCK))) { + blk_flags |= BLK_LAST_BLOCK; + + if (add_sentinel_block(data, fi, blk_flags)) + return -1; + } + + return 0; +} + +data_writer_t *data_writer_create(sqfs_super_t *super, compressor_t *cmp, + int outfd, size_t devblksize, + unsigned int num_jobs) +{ + data_writer_t *data = calloc(1, sizeof(*data)); + + if (data == NULL) { + perror("creating data writer"); + return NULL; + } + + data->proc = block_processor_create(super->block_size, cmp, num_jobs, + data, block_callback); + data->cmp = cmp; + data->super = super; + data->outfd = outfd; + data->devblksz = devblksize; + return data; +} + +void data_writer_destroy(data_writer_t *data) +{ + block_processor_destroy(data->proc); + free(data->fragments); + free(data); +} + +int data_writer_write_fragment_table(data_writer_t *data) +{ + uint64_t start; + size_t size; + int ret; + + if (data->num_fragments == 0) { + data->super->fragment_entry_count = 0; + data->super->fragment_table_start = 0xFFFFFFFFFFFFFFFFUL; + return 0; + } + + size = sizeof(data->fragments[0]) * data->num_fragments; + ret = sqfs_write_table(data->outfd, data->super, data->cmp, + data->fragments, size, &start); + if (ret) + return -1; + + data->super->fragment_entry_count = data->num_fragments; + data->super->fragment_table_start = start; + return 0; +} + +int data_writer_sync(data_writer_t *data) +{ + if (data->frag_block != NULL) { + if (flush_fragment_block(data)) + return -1; + } + + return block_processor_finish(data->proc); +} diff --git a/lib/sqfshelper/deserialize_fstree.c b/lib/sqfshelper/deserialize_fstree.c new file mode 100644 index 0000000..11670e1 --- /dev/null +++ b/lib/sqfshelper/deserialize_fstree.c @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * deserialize_fstree.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "xattr_reader.h" +#include "meta_reader.h" +#include "highlevel.h" + +#include <stdlib.h> +#include <string.h> +#include <stdio.h> + +static int should_skip(int type, int flags) +{ + switch (type) { + case SQFS_INODE_BDEV: + case SQFS_INODE_CDEV: + case SQFS_INODE_EXT_CDEV: + case SQFS_INODE_EXT_BDEV: + return (flags & RDTREE_NO_DEVICES); + case SQFS_INODE_SLINK: + case SQFS_INODE_EXT_SLINK: + return (flags & RDTREE_NO_SLINKS); + case SQFS_INODE_SOCKET: + case SQFS_INODE_EXT_SOCKET: + return(flags & RDTREE_NO_SOCKETS); + case SQFS_INODE_FIFO: + case SQFS_INODE_EXT_FIFO: + return (flags & RDTREE_NO_FIFO); + } + return 0; +} + +static int restore_xattr(xattr_reader_t *xr, fstree_t *fs, tree_node_t *node, + sqfs_inode_generic_t *inode) +{ + uint32_t idx; + + switch (inode->base.type) { + case SQFS_INODE_EXT_DIR: + idx = inode->data.dir_ext.xattr_idx; + break; + case SQFS_INODE_EXT_FILE: + idx = inode->data.file_ext.xattr_idx; + break; + case SQFS_INODE_EXT_SLINK: + idx = inode->data.slink_ext.xattr_idx; + break; + case SQFS_INODE_EXT_BDEV: + case SQFS_INODE_EXT_CDEV: + idx = inode->data.dev_ext.xattr_idx; + break; + case SQFS_INODE_EXT_FIFO: + case SQFS_INODE_EXT_SOCKET: + idx = inode->data.ipc_ext.xattr_idx; + break; + default: + return 0; + } + + return xattr_reader_restore_node(xr, fs, node, idx); +} + +static bool node_would_be_own_parent(tree_node_t *root, tree_node_t *n) +{ + while (root != NULL) { + if (root->inode_num == n->inode_num) + return true; + + root = root->parent; + } + + return false; +} + +static int fill_dir(meta_reader_t *ir, meta_reader_t *dr, tree_node_t *root, + sqfs_super_t *super, id_table_t *idtbl, fstree_t *fs, + xattr_reader_t *xr, int flags) +{ + sqfs_inode_generic_t *inode; + sqfs_dir_header_t hdr; + sqfs_dir_entry_t *ent; + tree_node_t *n, *prev; + uint64_t block_start; + size_t size, diff; + uint32_t i; + + size = root->data.dir->size; + if (size <= sizeof(hdr)) + return 0; + + block_start = root->data.dir->start_block; + block_start += super->directory_table_start; + + if (meta_reader_seek(dr, block_start, root->data.dir->block_offset)) + return -1; + + while (size > sizeof(hdr)) { + if (meta_reader_read_dir_header(dr, &hdr)) + return -1; + + size -= sizeof(hdr); + + for (i = 0; i <= hdr.count && size > sizeof(*ent); ++i) { + ent = meta_reader_read_dir_ent(dr); + if (ent == NULL) + return -1; + + diff = sizeof(*ent) + strlen((char *)ent->name); + if (diff > size) { + free(ent); + break; + } + size -= diff; + + if (should_skip(ent->type, flags)) { + free(ent); + continue; + } + + inode = meta_reader_read_inode(ir, super, + hdr.start_block, + ent->offset); + if (inode == NULL) { + free(ent); + return -1; + } + + n = tree_node_from_inode(inode, idtbl, + (char *)ent->name, + super->block_size); + + if (n == NULL) { + free(ent); + free(inode); + return -1; + } + + if (node_would_be_own_parent(root, n)) { + fputs("WARNING: Found a directory that " + "contains itself, skipping loop back " + "reference!\n", stderr); + free(n); + free(ent); + free(inode); + continue; + } + + if (flags & RDTREE_READ_XATTR) { + if (restore_xattr(xr, fs, n, inode)) { + free(n); + free(ent); + free(inode); + return -1; + } + } + + free(ent); + free(inode); + + n->parent = root; + n->next = root->data.dir->children; + root->data.dir->children = n; + } + } + + n = root->data.dir->children; + prev = NULL; + + while (n != NULL) { + if (S_ISDIR(n->mode)) { + if (fill_dir(ir, dr, n, super, idtbl, fs, xr, flags)) + return -1; + + if (n->data.dir->children == NULL && + (flags & RDTREE_NO_EMPTY)) { + if (prev == NULL) { + root->data.dir->children = n->next; + free(n); + n = root->data.dir->children; + } else { + prev->next = n->next; + free(n); + n = prev->next; + } + continue; + } + } + + prev = n; + n = n->next; + } + + return 0; +} + +int deserialize_fstree(fstree_t *out, sqfs_super_t *super, compressor_t *cmp, + int fd, int flags) +{ + uint64_t block_start, limit; + sqfs_inode_generic_t *root; + meta_reader_t *ir, *dr; + xattr_reader_t *xr; + id_table_t idtbl; + int status = -1; + size_t offset; + + ir = meta_reader_create(fd, cmp, super->inode_table_start, + super->directory_table_start); + if (ir == NULL) + return -1; + + limit = super->id_table_start; + if (super->export_table_start < limit) + limit = super->export_table_start; + if (super->fragment_table_start < limit) + limit = super->fragment_table_start; + + dr = meta_reader_create(fd, cmp, super->directory_table_start, limit); + if (dr == NULL) + goto out_ir; + + if (id_table_init(&idtbl)) + goto out_dr; + + if (id_table_read(&idtbl, fd, super, cmp)) + goto out_id; + + xr = xattr_reader_create(fd, super, cmp); + if (xr == NULL) + goto out_id; + + block_start = super->root_inode_ref >> 16; + offset = super->root_inode_ref & 0xFFFF; + root = meta_reader_read_inode(ir, super, block_start, offset); + if (root == NULL) + goto out_xr; + + if (root->base.type != SQFS_INODE_DIR && + root->base.type != SQFS_INODE_EXT_DIR) { + free(root); + fputs("File system root inode is not a directory inode!\n", + stderr); + goto out_xr; + } + + memset(out, 0, sizeof(*out)); + out->block_size = super->block_size; + out->defaults.st_uid = 0; + out->defaults.st_gid = 0; + out->defaults.st_mode = 0755; + out->defaults.st_mtime = super->modification_time; + + out->root = tree_node_from_inode(root, &idtbl, "", super->block_size); + + if (out->root == NULL) { + free(root); + goto out_xr; + } + + if (flags & RDTREE_READ_XATTR) { + if (str_table_init(&out->xattr_keys, + FSTREE_XATTR_KEY_BUCKETS)) { + free(root); + goto fail_fs; + } + + if (str_table_init(&out->xattr_values, + FSTREE_XATTR_VALUE_BUCKETS)) { + free(root); + goto fail_fs; + } + + if (restore_xattr(xr, out, out->root, root)) { + free(root); + goto fail_fs; + } + } + + free(root); + + if (fill_dir(ir, dr, out->root, super, &idtbl, out, xr, flags)) + goto fail_fs; + + tree_node_sort_recursive(out->root); + + status = 0; +out_xr: + xattr_reader_destroy(xr); +out_id: + id_table_cleanup(&idtbl); +out_dr: + meta_reader_destroy(dr); +out_ir: + meta_reader_destroy(ir); + return status; +fail_fs: + fstree_cleanup(out); + goto out_xr; +} diff --git a/lib/sqfshelper/serialize_fstree.c b/lib/sqfshelper/serialize_fstree.c new file mode 100644 index 0000000..736744b --- /dev/null +++ b/lib/sqfshelper/serialize_fstree.c @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * serialize_fstree.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "meta_writer.h" +#include "highlevel.h" +#include "util.h" + +#include <unistd.h> +#include <stdio.h> + +int sqfs_serialize_fstree(int outfd, sqfs_super_t *super, fstree_t *fs, + compressor_t *cmp, id_table_t *idtbl) +{ + meta_writer_t *im, *dm; + uint32_t offset; + uint64_t block; + int ret = -1; + size_t i; + + im = meta_writer_create(outfd, cmp, false); + if (im == NULL) + return -1; + + dm = meta_writer_create(outfd, cmp, true); + if (dm == NULL) + goto out_im; + + for (i = 2; i < fs->inode_tbl_size; ++i) { + if (meta_writer_write_inode(fs, idtbl, im, dm, + fs->inode_table[i])) { + goto out; + } + } + + if (meta_writer_flush(im)) + goto out; + + if (meta_writer_flush(dm)) + goto out; + + super->root_inode_ref = fs->root->inode_ref; + + meta_writer_get_position(im, &block, &offset); + super->inode_table_start = super->bytes_used; + super->bytes_used += block; + + meta_writer_get_position(dm, &block, &offset); + super->directory_table_start = super->bytes_used; + super->bytes_used += block; + + if (meta_write_write_to_file(dm)) + goto out; + + ret = 0; +out: + meta_writer_destroy(dm); +out_im: + meta_writer_destroy(im); + return ret; +} diff --git a/lib/sqfshelper/sqfs_reader.c b/lib/sqfshelper/sqfs_reader.c new file mode 100644 index 0000000..197e08f --- /dev/null +++ b/lib/sqfshelper/sqfs_reader.c @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * sqfs_reader.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "highlevel.h" + +#include <string.h> +#include <unistd.h> +#include <fcntl.h> + +int sqfs_reader_open(sqfs_reader_t *rd, const char *filename, int rdtree_flags) +{ + memset(rd, 0, sizeof(*rd)); + + rd->sqfsfd = open(filename, O_RDONLY); + if (rd->sqfsfd < 0) { + perror(filename); + return -1; + } + + if (sqfs_super_read(&rd->super, rd->sqfsfd)) + goto fail_fd; + + if (!compressor_exists(rd->super.compression_id)) { + fprintf(stderr, "%s: unknown compressor used.\n", filename); + goto fail_fd; + } + + rd->cmp = compressor_create(rd->super.compression_id, false, + rd->super.block_size, NULL); + if (rd->cmp == NULL) + goto fail_fd; + + if (rd->super.flags & SQFS_FLAG_COMPRESSOR_OPTIONS) { + if (rd->cmp->read_options(rd->cmp, rd->sqfsfd)) + goto fail_cmp; + } + + if (rd->super.flags & SQFS_FLAG_NO_XATTRS) + rdtree_flags &= ~RDTREE_READ_XATTR; + + if (deserialize_fstree(&rd->fs, &rd->super, rd->cmp, rd->sqfsfd, + rdtree_flags)) { + goto fail_cmp; + } + + fstree_gen_file_list(&rd->fs); + + rd->data = data_reader_create(rd->sqfsfd, &rd->super, rd->cmp); + if (rd->data == NULL) + goto fail_fs; + + return 0; +fail_fs: + fstree_cleanup(&rd->fs); +fail_cmp: + rd->cmp->destroy(rd->cmp); +fail_fd: + close(rd->sqfsfd); + memset(rd, 0, sizeof(*rd)); + return -1; +} + +void sqfs_reader_close(sqfs_reader_t *rd) +{ + data_reader_destroy(rd->data); + fstree_cleanup(&rd->fs); + rd->cmp->destroy(rd->cmp); + close(rd->sqfsfd); + memset(rd, 0, sizeof(*rd)); +} diff --git a/lib/sqfshelper/statistics.c b/lib/sqfshelper/statistics.c new file mode 100644 index 0000000..33ff7cb --- /dev/null +++ b/lib/sqfshelper/statistics.c @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * statistics.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" +#include "highlevel.h" + +#include <stdio.h> + +void sqfs_print_statistics(fstree_t *fs, sqfs_super_t *super) +{ + size_t blocks_written = 0, duplicate_blocks = 0, sparse_blocks = 0; + size_t ratio, file_count = 0, file_dup_count = 0; + size_t frag_count = 0, frag_dup = 0; + size_t i, num_blocks, sparse; + uint64_t output_bytes = 0; + uint64_t input_bytes = 0; + file_info_t *fi; + bool is_dupe; + + for (fi = fs->files; fi != NULL; fi = fi->next) { + num_blocks = fi->size / fs->block_size; + is_dupe = true; + + if ((fi->size % fs->block_size) && + !(fi->flags & FILE_FLAG_HAS_FRAGMENT)) { + ++num_blocks; + } + + for (sparse = 0, i = 0; i < num_blocks; ++i) { + if (fi->blocks[i].size == 0) + sparse += 1; + } + + if (num_blocks > sparse) { + if (fi->flags & FILE_FLAG_BLOCKS_ARE_DUPLICATE) { + duplicate_blocks += num_blocks - sparse; + } else { + blocks_written += num_blocks - sparse; + is_dupe = false; + } + } + + if (fi->flags & FILE_FLAG_HAS_FRAGMENT) { + if (fi->flags & FILE_FLAG_FRAGMENT_IS_DUPLICATE) { + frag_dup += 1; + } else { + frag_count += 1; + is_dupe = false; + } + } + + if (is_dupe) + file_dup_count += 1; + + sparse_blocks += sparse; + file_count += 1; + input_bytes += fi->size; + } + + if (input_bytes > 0) { + output_bytes = super->inode_table_start - sizeof(*super); + ratio = (100 * output_bytes) / input_bytes; + } else { + ratio = 100; + } + + fputs("---------------------------------------------------\n", stdout); + printf("Input files processed: %zu\n", file_count); + printf("Files that were complete duplicates: %zu\n", file_dup_count); + printf("Data blocks actually written: %zu\n", blocks_written); + printf("Fragment blocks written: %u\n", super->fragment_entry_count); + printf("Duplicate data blocks omitted: %zu\n", duplicate_blocks); + printf("Sparse blocks omitted: %zu\n", sparse_blocks); + printf("Fragments actually written: %zu\n", frag_count); + printf("Duplicated fragments omitted: %zu\n", frag_dup); + printf("Total number of inodes: %u\n", super->inode_count); + printf("Number of unique group/user IDs: %u\n", super->id_count); + printf("Data compression ratio: %zu%%\n", ratio); +} diff --git a/lib/sqfshelper/tree_node_from_inode.c b/lib/sqfshelper/tree_node_from_inode.c new file mode 100644 index 0000000..582399c --- /dev/null +++ b/lib/sqfshelper/tree_node_from_inode.c @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * tree_node_from_inode.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "highlevel.h" + +#include <stdlib.h> +#include <string.h> +#include <stdio.h> + +static size_t compute_size(sqfs_inode_generic_t *inode, const char *name, + size_t block_size) +{ + size_t size = sizeof(tree_node_t) + strlen(name) + 1; + size_t block_count = 0; + + switch (inode->base.type) { + case SQFS_INODE_DIR: + case SQFS_INODE_EXT_DIR: + size += sizeof(dir_info_t); + break; + case SQFS_INODE_FILE: + size += sizeof(file_info_t); + block_count = inode->data.file.file_size / block_size; + if ((inode->data.file.file_size % block_size) != 0) + ++block_count; + break; + case SQFS_INODE_EXT_FILE: + size += sizeof(file_info_t); + block_count = inode->data.file_ext.file_size / block_size; + if ((inode->data.file_ext.file_size % block_size) != 0) + ++block_count; + break; + case SQFS_INODE_SLINK: + case SQFS_INODE_EXT_SLINK: + size += strlen(inode->slink_target) + 1; + break; + default: + break; + } + + return size + block_count * sizeof(((file_info_t *)0)->blocks[0]); +} + +static void copy_block_sizes(sqfs_inode_generic_t *inode, tree_node_t *out, + size_t block_size) +{ + size_t i, block_count = out->data.file->size / block_size; + + if ((out->data.file->size % block_size) != 0) { + if (out->data.file->fragment == 0xFFFFFFFF || + out->data.file->fragment_offset == 0xFFFFFFFF) { + ++block_count; + } else { + out->data.file->flags |= FILE_FLAG_HAS_FRAGMENT; + } + } + + out->name += block_count * sizeof(out->data.file->blocks[0]); + + for (i = 0; i < block_count; ++i) + out->data.file->blocks[i].size = inode->block_sizes[i]; +} + +tree_node_t *tree_node_from_inode(sqfs_inode_generic_t *inode, + const id_table_t *idtbl, + const char *name, + size_t block_size) +{ + tree_node_t *out; + + if (inode->base.uid_idx >= idtbl->num_ids) { + fputs("converting inode to fs tree node: UID out of range\n", + stderr); + return NULL; + } + + if (inode->base.gid_idx >= idtbl->num_ids) { + fputs("converting inode to fs tree node: GID out of range\n", + stderr); + return NULL; + } + + out = calloc(1, compute_size(inode, name, block_size)); + if (out == NULL) { + perror("converting inode to fs tree node"); + return NULL; + } + + out->uid = idtbl->ids[inode->base.uid_idx]; + out->gid = idtbl->ids[inode->base.gid_idx]; + out->mode = inode->base.mode; + out->inode_num = inode->base.inode_number; + out->mod_time = inode->base.mod_time; + out->name = (char *)out->payload; + + switch (inode->base.type) { + case SQFS_INODE_DIR: + out->data.dir = (dir_info_t *)out->payload; + out->name += sizeof(dir_info_t); + + out->data.dir->size = inode->data.dir.size; + out->data.dir->start_block = inode->data.dir.start_block; + out->data.dir->block_offset = inode->data.dir.offset; + break; + case SQFS_INODE_EXT_DIR: + out->data.dir = (dir_info_t *)out->payload; + out->name += sizeof(dir_info_t); + + out->data.dir->size = inode->data.dir_ext.size; + out->data.dir->start_block = inode->data.dir_ext.start_block; + out->data.dir->block_offset = inode->data.dir_ext.offset; + break; + case SQFS_INODE_FILE: + out->data.file = (file_info_t *)out->payload; + out->name += sizeof(file_info_t); + + out->data.file->size = inode->data.file.file_size; + out->data.file->startblock = inode->data.file.blocks_start; + out->data.file->fragment = inode->data.file.fragment_index; + out->data.file->fragment_offset = + inode->data.file.fragment_offset; + + copy_block_sizes(inode, out, block_size); + break; + case SQFS_INODE_EXT_FILE: + out->data.file = (file_info_t *)out->payload; + out->name += sizeof(file_info_t); + + out->data.file->size = inode->data.file_ext.file_size; + out->data.file->sparse = inode->data.file_ext.sparse; + out->data.file->startblock = inode->data.file_ext.blocks_start; + out->data.file->fragment = inode->data.file_ext.fragment_idx; + out->data.file->fragment_offset = + inode->data.file_ext.fragment_offset; + + copy_block_sizes(inode, out, block_size); + break; + case SQFS_INODE_SLINK: + case SQFS_INODE_EXT_SLINK: + out->data.slink_target = (char *)out->payload; + strcpy(out->data.slink_target, inode->slink_target); + + out->name = (char *)out->payload + + strlen(inode->slink_target) + 1; + break; + case SQFS_INODE_BDEV: + case SQFS_INODE_CDEV: + out->name = (char *)out->payload; + out->data.devno = inode->data.dev.devno; + break; + case SQFS_INODE_EXT_BDEV: + case SQFS_INODE_EXT_CDEV: + out->name = (char *)out->payload; + out->data.devno = inode->data.dev_ext.devno; + break; + default: + break; + } + + strcpy(out->name, name); + return out; +} diff --git a/lib/sqfshelper/write_dir.c b/lib/sqfshelper/write_dir.c new file mode 100644 index 0000000..23297ad --- /dev/null +++ b/lib/sqfshelper/write_dir.c @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * write_dir.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "meta_writer.h" +#include "util.h" + +#include <assert.h> +#include <endian.h> +#include <stdlib.h> +#include <string.h> +#include <stdio.h> + +static int get_type(mode_t mode) +{ + switch (mode & S_IFMT) { + case S_IFSOCK: return SQFS_INODE_SOCKET; + case S_IFIFO: return SQFS_INODE_FIFO; + case S_IFLNK: return SQFS_INODE_SLINK; + case S_IFBLK: return SQFS_INODE_BDEV; + case S_IFCHR: return SQFS_INODE_CDEV; + case S_IFDIR: return SQFS_INODE_DIR; + case S_IFREG: return SQFS_INODE_FILE; + default: + assert(0); + } +} + +static int dir_index_grow(dir_index_t **index) +{ + size_t size = sizeof(dir_index_t) + sizeof(idx_ref_t) * 10; + void *new; + + if (*index == NULL) { + new = calloc(1, size); + } else { + if ((*index)->num_nodes < (*index)->max_nodes) + return 0; + + size += sizeof(idx_ref_t) * (*index)->num_nodes; + new = realloc(*index, size); + } + + if (new == NULL) { + perror("creating directory index"); + return -1; + } + + *index = new; + (*index)->max_nodes += 10; + return 0; +} + +int meta_writer_write_dir(meta_writer_t *dm, dir_info_t *dir, + dir_index_t **index) +{ + size_t i, size, count; + sqfs_dir_header_t hdr; + sqfs_dir_entry_t ent; + tree_node_t *c, *d; + uint16_t *diff_u16; + uint32_t offset; + uint64_t block; + int32_t diff; + + c = dir->children; + dir->size = 0; + + meta_writer_get_position(dm, &dir->start_block, &dir->block_offset); + + while (c != NULL) { + meta_writer_get_position(dm, &block, &offset); + + count = 0; + size = (offset + sizeof(hdr)) % SQFS_META_BLOCK_SIZE; + + for (d = c; d != NULL; d = d->next) { + if ((d->inode_ref >> 16) != (c->inode_ref >> 16)) + break; + + diff = d->inode_num - c->inode_num; + + if (diff > 32767 || diff < -32767) + break; + + size += sizeof(ent) + strlen(c->name); + + if (count > 0 && size > SQFS_META_BLOCK_SIZE) + break; + + count += 1; + } + + if (count > SQFS_MAX_DIR_ENT) + count = SQFS_MAX_DIR_ENT; + + if (dir_index_grow(index)) + return -1; + + meta_writer_get_position(dm, &block, &offset); + + i = (*index)->num_nodes++; + (*index)->idx_nodes[i].node = c; + (*index)->idx_nodes[i].block = block; + (*index)->idx_nodes[i].index = dir->size; + + hdr.count = htole32(count - 1); + hdr.start_block = htole32(c->inode_ref >> 16); + hdr.inode_number = htole32(c->inode_num); + dir->size += sizeof(hdr); + + if (meta_writer_append(dm, &hdr, sizeof(hdr))) + return -1; + + d = c; + + for (i = 0; i < count; ++i) { + ent.inode_diff = c->inode_num - d->inode_num; + + diff_u16 = (uint16_t *)&ent.inode_diff; + *diff_u16 = htole16(*diff_u16); + + ent.offset = htole16(c->inode_ref & 0x0000FFFF); + ent.type = htole16(get_type(c->mode)); + ent.size = htole16(strlen(c->name) - 1); + dir->size += sizeof(ent) + strlen(c->name); + + if (meta_writer_append(dm, &ent, sizeof(ent))) + return -1; + if (meta_writer_append(dm, c->name, strlen(c->name))) + return -1; + + c = c->next; + } + } + return 0; +} diff --git a/lib/sqfshelper/write_export_table.c b/lib/sqfshelper/write_export_table.c new file mode 100644 index 0000000..e42df15 --- /dev/null +++ b/lib/sqfshelper/write_export_table.c @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * write_export_table.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "highlevel.h" +#include "util.h" + +#include <stdlib.h> +#include <stdio.h> + +int write_export_table(int outfd, fstree_t *fs, sqfs_super_t *super, + compressor_t *cmp) +{ + uint64_t *table, start; + size_t i, size; + int ret; + + if (fs->inode_tbl_size < 1) + return 0; + + table = alloc_array(sizeof(uint64_t), (fs->inode_tbl_size - 1)); + + if (table == NULL) { + perror("Allocating NFS export table"); + return -1; + } + + for (i = 1; i < fs->inode_tbl_size; ++i) { + if (fs->inode_table[i] == NULL) { + table[i - 1] = htole64(0xFFFFFFFFFFFFFFFF); + } else { + table[i - 1] = htole64(fs->inode_table[i]->inode_ref); + } + } + + size = sizeof(uint64_t) * (fs->inode_tbl_size - 1); + ret = sqfs_write_table(outfd, super, cmp, table, size, &start); + + super->export_table_start = start; + super->flags |= SQFS_FLAG_EXPORTABLE; + free(table); + return ret; +} diff --git a/lib/sqfshelper/write_inode.c b/lib/sqfshelper/write_inode.c new file mode 100644 index 0000000..1295fa9 --- /dev/null +++ b/lib/sqfshelper/write_inode.c @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * write_inode.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "meta_writer.h" +#include "util.h" + +#include <assert.h> +#include <endian.h> +#include <stdlib.h> +#include <string.h> + +static size_t hard_link_count(tree_node_t *n) +{ + size_t count; + + if (S_ISDIR(n->mode)) { + count = 2; + + for (n = n->data.dir->children; n != NULL; n = n->next) + ++count; + + return count; + } + + return 1; +} + +static int get_type(tree_node_t *node) +{ + switch (node->mode & S_IFMT) { + case S_IFSOCK: + if (node->xattr != NULL) + return SQFS_INODE_EXT_SOCKET; + return SQFS_INODE_SOCKET; + case S_IFIFO: + if (node->xattr != NULL) + return SQFS_INODE_EXT_FIFO; + return SQFS_INODE_FIFO; + case S_IFLNK: + if (node->xattr != NULL) + return SQFS_INODE_EXT_SLINK; + return SQFS_INODE_SLINK; + case S_IFBLK: + if (node->xattr != NULL) + return SQFS_INODE_EXT_BDEV; + return SQFS_INODE_BDEV; + case S_IFCHR: + if (node->xattr != NULL) + return SQFS_INODE_EXT_CDEV; + return SQFS_INODE_CDEV; + case S_IFREG: { + file_info_t *fi = node->data.file; + + if (node->xattr != NULL || fi->sparse > 0) + return SQFS_INODE_EXT_FILE; + + if (fi->startblock > 0xFFFFFFFFUL || fi->size > 0xFFFFFFFFUL) + return SQFS_INODE_EXT_FILE; + + if (hard_link_count(node) > 1) + return SQFS_INODE_EXT_FILE; + + return SQFS_INODE_FILE; + } + case S_IFDIR: { + dir_info_t *di = node->data.dir; + + if (node->xattr != NULL) + return SQFS_INODE_EXT_DIR; + + if (di->start_block > 0xFFFFFFFFUL || di->size > 0xFFFF) + return SQFS_INODE_EXT_DIR; + + return SQFS_INODE_DIR; + } + } + assert(0); +} + +static int write_file_blocks(fstree_t *fs, file_info_t *fi, meta_writer_t *im) +{ + uint64_t i, count = fi->size / fs->block_size; + uint32_t bs; + + if ((fi->size % fs->block_size) != 0 && + !(fi->flags & FILE_FLAG_HAS_FRAGMENT)) { + ++count; + } + + for (i = 0; i < count; ++i) { + bs = htole32(fi->blocks[i].size); + + if (meta_writer_append(im, &bs, sizeof(bs))) + return -1; + } + return 0; +} + +static int write_dir_index(dir_index_t *diridx, meta_writer_t *im) +{ + sqfs_dir_index_t idx; + size_t i; + + for (i = 0; i < diridx->num_nodes; ++i) { + idx.start_block = htole32(diridx->idx_nodes[i].block); + idx.index = htole32(diridx->idx_nodes[i].index); + idx.size = strlen(diridx->idx_nodes[i].node->name) - 1; + idx.size = htole32(idx.size); + + if (meta_writer_append(im, &idx, sizeof(idx))) + return -1; + + if (meta_writer_append(im, diridx->idx_nodes[i].node->name, + le32toh(idx.size) + 1)) { + return -1; + } + } + return 0; +} + +int meta_writer_write_inode(fstree_t *fs, id_table_t *idtbl, meta_writer_t *im, + meta_writer_t *dm, tree_node_t *node) +{ + dir_index_t *diridx = NULL; + uint16_t uid_idx, gid_idx; + sqfs_inode_t base; + uint32_t offset; + uint64_t block; + + if (id_table_id_to_index(idtbl, node->uid, &uid_idx)) + return -1; + + if (id_table_id_to_index(idtbl, node->gid, &gid_idx)) + return -1; + + meta_writer_get_position(im, &block, &offset); + node->inode_ref = (block << 16) | offset; + + if (S_ISDIR(node->mode)) { + if (meta_writer_write_dir(dm, node->data.dir, &diridx)) + return -1; + } + + base.type = htole16(get_type(node)); + base.mode = htole16(node->mode); + base.uid_idx = htole16(uid_idx); + base.gid_idx = htole16(gid_idx); + base.mod_time = htole32(node->mod_time); + base.inode_number = htole32(node->inode_num); + + if (meta_writer_append(im, &base, sizeof(base))) { + free(diridx); + return -1; + } + + switch (le16toh(base.type)) { + case SQFS_INODE_FIFO: + case SQFS_INODE_SOCKET: { + sqfs_inode_ipc_t ipc = { + .nlink = htole32(hard_link_count(node)), + }; + + return meta_writer_append(im, &ipc, sizeof(ipc)); + } + case SQFS_INODE_EXT_FIFO: + case SQFS_INODE_EXT_SOCKET: { + sqfs_inode_ipc_ext_t ipc = { + .nlink = htole32(hard_link_count(node)), + .xattr_idx = htole32(0xFFFFFFFF), + }; + + if (node->xattr != NULL) + ipc.xattr_idx = htole32(node->xattr->index); + + return meta_writer_append(im, &ipc, sizeof(ipc)); + } + case SQFS_INODE_SLINK: { + sqfs_inode_slink_t slink = { + .nlink = htole32(hard_link_count(node)), + .target_size = htole32(strlen(node->data.slink_target)), + }; + + if (meta_writer_append(im, &slink, sizeof(slink))) + return -1; + if (meta_writer_append(im, node->data.slink_target, + le32toh(slink.target_size))) { + return -1; + } + break; + } + case SQFS_INODE_EXT_SLINK: { + sqfs_inode_slink_t slink = { + .nlink = htole32(hard_link_count(node)), + .target_size = htole32(strlen(node->data.slink_target)), + }; + uint32_t xattr = htole32(0xFFFFFFFF); + + if (node->xattr != NULL) + xattr = htole32(node->xattr->index); + + if (meta_writer_append(im, &slink, sizeof(slink))) + return -1; + if (meta_writer_append(im, node->data.slink_target, + le32toh(slink.target_size))) { + return -1; + } + if (meta_writer_append(im, &xattr, sizeof(xattr))) + return -1; + break; + } + case SQFS_INODE_BDEV: + case SQFS_INODE_CDEV: { + sqfs_inode_dev_t dev = { + .nlink = htole32(hard_link_count(node)), + .devno = htole32(node->data.devno), + }; + + return meta_writer_append(im, &dev, sizeof(dev)); + } + case SQFS_INODE_EXT_BDEV: + case SQFS_INODE_EXT_CDEV: { + sqfs_inode_dev_ext_t dev = { + .nlink = htole32(hard_link_count(node)), + .devno = htole32(node->data.devno), + .xattr_idx = htole32(0xFFFFFFFF), + }; + + if (node->xattr != NULL) + dev.xattr_idx = htole32(node->xattr->index); + + return meta_writer_append(im, &dev, sizeof(dev)); + } + case SQFS_INODE_EXT_FILE: { + file_info_t *fi = node->data.file; + sqfs_inode_file_ext_t ext = { + .blocks_start = htole64(fi->startblock), + .file_size = htole64(fi->size), + .sparse = htole64(fi->sparse), + .nlink = htole32(hard_link_count(node)), + .fragment_idx = htole32(0xFFFFFFFF), + .fragment_offset = htole32(0xFFFFFFFF), + .xattr_idx = htole32(0xFFFFFFFF), + }; + + if ((fi->size % fs->block_size) != 0) { + ext.fragment_idx = htole32(fi->fragment); + ext.fragment_offset = htole32(fi->fragment_offset); + } + + if (node->xattr != NULL) + ext.xattr_idx = htole32(node->xattr->index); + + if (meta_writer_append(im, &ext, sizeof(ext))) + return -1; + return write_file_blocks(fs, fi, im); + } + case SQFS_INODE_FILE: { + file_info_t *fi = node->data.file; + sqfs_inode_file_t reg = { + .blocks_start = htole32(fi->startblock), + .fragment_index = htole32(0xFFFFFFFF), + .fragment_offset = htole32(0xFFFFFFFF), + .file_size = htole32(fi->size), + }; + + if ((fi->size % fs->block_size) != 0) { + reg.fragment_index = htole32(fi->fragment); + reg.fragment_offset = htole32(fi->fragment_offset); + } + + if (meta_writer_append(im, ®, sizeof(reg))) + return -1; + return write_file_blocks(fs, fi, im); + } + case SQFS_INODE_DIR: { + sqfs_inode_dir_t dir = { + .start_block = htole32(node->data.dir->start_block), + .nlink = htole32(hard_link_count(node)), + .size = htole16(node->data.dir->size), + .offset = htole16(node->data.dir->block_offset), + .parent_inode = node->parent ? + htole32(node->parent->inode_num) : htole32(1), + }; + + return meta_writer_append(im, &dir, sizeof(dir)); + } + case SQFS_INODE_EXT_DIR: { + sqfs_inode_dir_ext_t ext = { + .nlink = htole32(hard_link_count(node)), + .size = htole32(node->data.dir->size), + .start_block = htole32(node->data.dir->start_block), + .parent_inode = node->parent ? + htole32(node->parent->inode_num) : htole32(1), + .inodex_count = htole32(0), + .offset = htole16(node->data.dir->block_offset), + .xattr_idx = htole32(0xFFFFFFFF), + }; + + if (node->xattr != NULL) + ext.xattr_idx = htole32(node->xattr->index); + + if (diridx != NULL) + ext.inodex_count = htole32(diridx->num_nodes - 1); + + if (meta_writer_append(im, &ext, sizeof(ext))) { + free(diridx); + return -1; + } + + if (diridx != NULL && write_dir_index(diridx, im) != 0) { + free(diridx); + return -1; + } + + free(diridx); + break; + } + default: + assert(0); + } + return 0; +} diff --git a/lib/sqfshelper/write_xattr.c b/lib/sqfshelper/write_xattr.c new file mode 100644 index 0000000..2263fbe --- /dev/null +++ b/lib/sqfshelper/write_xattr.c @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * write_xattr.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "meta_writer.h" +#include "highlevel.h" +#include "util.h" + +#include <assert.h> +#include <stdlib.h> +#include <string.h> +#include <stdio.h> + +static int write_key(meta_writer_t *mw, const char *key, tree_xattr_t *xattr, + bool value_is_ool) +{ + sqfs_xattr_entry_t kent; + int type; + + type = sqfs_get_xattr_prefix_id(key); + if (type < 0) { + fprintf(stderr, "unsupported xattr key '%s'\n", key); + return -1; + } + + key = strchr(key, '.'); + assert(key != NULL); + ++key; + + if (value_is_ool) + type |= SQUASHFS_XATTR_FLAG_OOL; + + kent.type = htole16(type); + kent.size = htole16(strlen(key)); + + if (meta_writer_append(mw, &kent, sizeof(kent))) + return -1; + if (meta_writer_append(mw, key, strlen(key))) + return -1; + + xattr->size += sizeof(sqfs_xattr_entry_t) + strlen(key); + return 0; +} + +static int write_value(meta_writer_t *mw, const char *value, + tree_xattr_t *xattr, uint64_t *value_ref_out) +{ + sqfs_xattr_value_t vent; + uint32_t offset; + uint64_t block; + + meta_writer_get_position(mw, &block, &offset); + *value_ref_out = (block << 16) | (offset & 0xFFFF); + + vent.size = htole32(strlen(value)); + + if (meta_writer_append(mw, &vent, sizeof(vent))) + return -1; + + if (meta_writer_append(mw, value, strlen(value))) + return -1; + + xattr->size += sizeof(vent) + strlen(value); + return 0; +} + +static int write_value_ool(meta_writer_t *mw, uint64_t location, + tree_xattr_t *xattr) +{ + sqfs_xattr_value_t vent; + uint64_t ref; + + vent.size = htole32(sizeof(location)); + if (meta_writer_append(mw, &vent, sizeof(vent))) + return -1; + + ref = htole64(location); + if (meta_writer_append(mw, &ref, sizeof(ref))) + return -1; + + xattr->size += sizeof(vent) + sizeof(ref); + return 0; +} + +static bool should_store_ool(fstree_t *fs, const char *value, size_t index) +{ + size_t refcount; + + refcount = str_table_get_ref_count(&fs->xattr_values, index); + if (refcount < 2) + return false; + + /* + Storing in line needs this many bytes: refcount * len + + Storing out-of-line needs this many: len + (refcount - 1) * 8 + + Out-of-line prefereable if: + refcount * len > len + (refcount - 1) * 8 + => refcount * len - len > (refcount - 1) * 8 + => (refcount - 1) * len > (refcount - 1) * 8 + => len > 8 + + Note that this only holds iff refcount - 1 != 0, i.e. refcount > 1, + otherwise we would be dividing by 0 in the 3rd step. + */ + return strlen(value) > sizeof(uint64_t); +} + +static int write_kv_pairs(fstree_t *fs, meta_writer_t *mw, tree_xattr_t *xattr, + uint64_t *ool_locations) +{ + uint32_t key_idx, val_idx; + const char *key, *value; + uint64_t ref; + size_t i; + + for (i = 0; i < xattr->num_attr; ++i) { + key_idx = xattr->attr[i].key_index; + val_idx = xattr->attr[i].value_index; + + key = str_table_get_string(&fs->xattr_keys, key_idx); + value = str_table_get_string(&fs->xattr_values, val_idx); + + if (ool_locations[val_idx] == 0xFFFFFFFFFFFFFFFF) { + if (write_key(mw, key, xattr, false)) + return -1; + + if (write_value(mw, value, xattr, &ref)) + return -1; + + if (should_store_ool(fs, value, val_idx)) + ool_locations[val_idx] = ref; + } else { + if (write_key(mw, key, xattr, true)) + return -1; + + if (write_value_ool(mw, ool_locations[val_idx], xattr)) + return -1; + } + } + + return 0; +} + +static uint64_t *create_ool_locations_table(fstree_t *fs) +{ + uint64_t *table; + size_t i; + + table = alloc_array(sizeof(uint64_t), fs->xattr_values.num_strings); + + if (table == NULL) { + perror("allocating Xattr OOL locations table"); + return NULL; + } + + for (i = 0; i < fs->xattr_values.num_strings; ++i) { + table[i] = 0xFFFFFFFFFFFFFFFFUL; + } + + return table; +} + +int write_xattr(int outfd, fstree_t *fs, sqfs_super_t *super, + compressor_t *cmp) +{ + uint64_t kv_start, id_start, block, *tbl, *ool_locations; + size_t i = 0, count = 0, blocks; + sqfs_xattr_id_table_t idtbl; + sqfs_xattr_id_t id_ent; + meta_writer_t *mw; + tree_xattr_t *it; + uint32_t offset; + + if (fs->xattr == NULL) + return 0; + + ool_locations = create_ool_locations_table(fs); + if (ool_locations == NULL) + return -1; + + mw = meta_writer_create(outfd, cmp, false); + if (mw == NULL) + goto fail_ool; + + /* write xattr key-value pairs */ + kv_start = super->bytes_used; + + for (it = fs->xattr; it != NULL; it = it->next) { + meta_writer_get_position(mw, &it->block, &it->offset); + it->size = 0; + + if (write_kv_pairs(fs, mw, it, ool_locations)) + goto fail_mw; + + ++count; + } + + if (meta_writer_flush(mw)) + goto fail_mw; + + meta_writer_get_position(mw, &block, &offset); + meta_writer_reset(mw); + + super->bytes_used += block; + + /* allocate location table */ + blocks = (count * sizeof(id_ent)) / SQFS_META_BLOCK_SIZE; + + if ((count * sizeof(id_ent)) % SQFS_META_BLOCK_SIZE) + ++blocks; + + tbl = alloc_array(sizeof(uint64_t), blocks); + + if (tbl == NULL) { + perror("generating xattr ID table"); + goto fail_mw; + } + + /* write ID table referring to key value pairs, record offsets */ + id_start = 0; + tbl[i++] = htole64(super->bytes_used); + + for (it = fs->xattr; it != NULL; it = it->next) { + id_ent.xattr = htole64((it->block << 16) | it->offset); + id_ent.count = htole32(it->num_attr); + id_ent.size = htole32(it->size); + + if (meta_writer_append(mw, &id_ent, sizeof(id_ent))) + goto fail_tbl; + + meta_writer_get_position(mw, &block, &offset); + + if (block != id_start) { + id_start = block; + tbl[i++] = htole64(super->bytes_used + id_start); + } + } + + if (meta_writer_flush(mw)) + goto fail_tbl; + + meta_writer_get_position(mw, &block, &offset); + super->bytes_used += block; + + /* write offset table */ + idtbl.xattr_table_start = htole64(kv_start); + idtbl.xattr_ids = htole32(count); + idtbl.unused = 0; + + if (write_data("writing xattr ID table", outfd, &idtbl, sizeof(idtbl))) + goto fail_tbl; + + if (write_data("writing xattr ID table", + outfd, tbl, sizeof(tbl[0]) * blocks)) { + goto fail_tbl; + } + + super->xattr_id_table_start = super->bytes_used; + super->bytes_used += sizeof(idtbl) + sizeof(tbl[0]) * blocks; + super->flags &= ~SQFS_FLAG_NO_XATTRS; + + free(tbl); + meta_writer_destroy(mw); + free(ool_locations); + return 0; +fail_tbl: + free(tbl); +fail_mw: + meta_writer_destroy(mw); +fail_ool: + free(ool_locations); + return -1; +} diff --git a/lib/sqfshelper/xattr_reader.c b/lib/sqfshelper/xattr_reader.c new file mode 100644 index 0000000..399940f --- /dev/null +++ b/lib/sqfshelper/xattr_reader.c @@ -0,0 +1,393 @@ +/* SPDX-License-Identifier: GPL-3.0-or-later */ +/* + * xattr_reader.c + * + * Copyright (C) 2019 David Oberhollenzer <goliath@infraroot.at> + */ +#include "config.h" + +#include "xattr_reader.h" +#include "meta_reader.h" +#include "util.h" + +#include <assert.h> +#include <stdlib.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> + +struct xattr_reader_t { + uint64_t xattr_start; + + size_t num_id_blocks; + size_t num_ids; + + uint64_t *id_block_starts; + + meta_reader_t *idrd; + meta_reader_t *kvrd; + sqfs_super_t *super; +}; + +static int get_id_block_locations(xattr_reader_t *xr, int sqfsfd, + sqfs_super_t *super) +{ + sqfs_xattr_id_table_t idtbl; + size_t i; + + if (super->xattr_id_table_start >= super->bytes_used) { + fputs("xattr ID location table is after end of filesystem\n", + stderr); + return -1; + } + + if (read_data_at("reading xattr ID location table", + super->xattr_id_table_start, + sqfsfd, &idtbl, sizeof(idtbl))) { + return -1; + } + + xr->xattr_start = le64toh(idtbl.xattr_table_start); + xr->num_ids = le32toh(idtbl.xattr_ids); + xr->num_id_blocks = + (xr->num_ids * sizeof(sqfs_xattr_id_t)) / SQFS_META_BLOCK_SIZE; + + if ((xr->num_ids * sizeof(sqfs_xattr_id_t)) % SQFS_META_BLOCK_SIZE) + xr->num_id_blocks += 1; + + xr->id_block_starts = alloc_array(sizeof(uint64_t), xr->num_id_blocks); + if (xr->id_block_starts == NULL) { + perror("allocating xattr ID location table"); + return -1; + } + + if (read_data_at("reading xattr ID block locations", + super->xattr_id_table_start + sizeof(idtbl), + sqfsfd, xr->id_block_starts, + sizeof(uint64_t) * xr->num_id_blocks)) { + goto fail; + } + + for (i = 0; i < xr->num_id_blocks; ++i) { + xr->id_block_starts[i] = le64toh(xr->id_block_starts[i]); + + if (xr->id_block_starts[i] > super->bytes_used) { + fputs("found xattr ID block that is past " + "end of filesystem\n", stderr); + goto fail; + } + } + + return 0; +fail: + free(xr->id_block_starts); + xr->id_block_starts = NULL; + return -1; +} + +static int get_xattr_desc(xattr_reader_t *xr, uint32_t idx, + sqfs_xattr_id_t *desc) +{ + size_t block, offset; + + if (idx >= xr->num_ids) { + fprintf(stderr, "Tried to access out of bounds " + "xattr index: 0x%08X\n", idx); + return -1; + } + + offset = (idx * sizeof(*desc)) % SQFS_META_BLOCK_SIZE; + block = (idx * sizeof(*desc)) / SQFS_META_BLOCK_SIZE; + + if (meta_reader_seek(xr->idrd, xr->id_block_starts[block], offset)) + return -1; + + if (meta_reader_read(xr->idrd, desc, sizeof(*desc))) + return -1; + + desc->xattr = le64toh(desc->xattr); + desc->count = le32toh(desc->count); + desc->size = le32toh(desc->size); + + if ((desc->xattr & 0xFFFF) >= SQFS_META_BLOCK_SIZE) { + fputs("Found xattr ID record pointing outside " + "metadata block\n", stderr); + return -1; + } + + if ((xr->xattr_start + (desc->xattr >> 16)) >= xr->super->bytes_used) { + fputs("Found xattr ID record pointing past " + "end of filesystem\n", stderr); + return -1; + } + + return 0; +} + +static sqfs_xattr_entry_t *read_key(xattr_reader_t *xr) +{ + sqfs_xattr_entry_t key, *out; + const char *prefix; + size_t plen, total; + + if (meta_reader_read(xr->kvrd, &key, sizeof(key))) + return NULL; + + key.type = le16toh(key.type); + key.size = le16toh(key.size); + + prefix = sqfs_get_xattr_prefix(key.type & SQUASHFS_XATTR_PREFIX_MASK); + if (prefix == NULL) { + fprintf(stderr, "found unknown xattr type %u\n", + key.type & SQUASHFS_XATTR_PREFIX_MASK); + return NULL; + } + + plen = strlen(prefix); + + if (SZ_ADD_OV(plen, key.size, &total) || SZ_ADD_OV(total, 1, &total) || + SZ_ADD_OV(sizeof(*out), total, &total)) { + errno = EOVERFLOW; + goto fail_alloc; + } + + out = calloc(1, total); + if (out == NULL) { + goto fail_alloc; + } + + *out = key; + memcpy(out->key, prefix, plen); + + if (meta_reader_read(xr->kvrd, out->key + plen, key.size)) { + free(out); + return NULL; + } + + return out; +fail_alloc: + perror("allocating xattr key"); + return NULL; +} + +static sqfs_xattr_value_t *read_value(xattr_reader_t *xr, + const sqfs_xattr_entry_t *key) +{ + size_t offset, new_offset, size; + sqfs_xattr_value_t value, *out; + uint64_t ref, start, new_start; + + if (meta_reader_read(xr->kvrd, &value, sizeof(value))) + return NULL; + + if (key->type & SQUASHFS_XATTR_FLAG_OOL) { + if (meta_reader_read(xr->kvrd, &ref, sizeof(ref))) + return NULL; + + meta_reader_get_position(xr->kvrd, &start, &offset); + + new_start = xr->xattr_start + (ref >> 16); + new_offset = ref & 0xFFFF; + + if (new_start > xr->super->bytes_used) { + fputs("OOL xattr reference points past end of " + "filesystem\n", stderr); + return NULL; + } + + if (new_offset >= SQFS_META_BLOCK_SIZE) { + fputs("OOL xattr reference points outside " + "metadata block\n", stderr); + return NULL; + } + + if (meta_reader_seek(xr->kvrd, new_start, new_offset)) + return NULL; + } + + value.size = le32toh(value.size); + + if (SZ_ADD_OV(sizeof(*out), value.size, &size) || + SZ_ADD_OV(size, 1, &size)) { + errno = EOVERFLOW; + goto fail_alloc; + } + + out = calloc(1, size); + if (out == NULL) + goto fail_alloc; + + *out = value; + + if (meta_reader_read(xr->kvrd, out->value, value.size)) + goto fail; + + if (key->type & SQUASHFS_XATTR_FLAG_OOL) { + if (meta_reader_seek(xr->kvrd, start, offset)) + goto fail; + } + + return out; +fail_alloc: + perror("allocating xattr value"); + return NULL; +fail: + free(out); + return NULL; +} + +static int restore_kv_pairs(xattr_reader_t *xr, fstree_t *fs, + tree_node_t *node) +{ + size_t i, key_idx, val_idx; + sqfs_xattr_entry_t *key; + sqfs_xattr_value_t *val; + int ret; + + if (meta_reader_seek(xr->kvrd, node->xattr->block, + node->xattr->offset)) { + return -1; + } + + for (i = 0; i < node->xattr->num_attr; ++i) { + key = read_key(xr); + if (key == NULL) + return -1; + + val = read_value(xr, key); + if (val == NULL) + goto fail_key; + + ret = str_table_get_index(&fs->xattr_keys, + (const char *)key->key, &key_idx); + if (ret) + goto fail_kv; + + ret = str_table_get_index(&fs->xattr_values, + (const char *)val->value, &val_idx); + if (ret) + goto fail_kv; + + if (sizeof(size_t) > sizeof(uint32_t)) { + if (key_idx > 0xFFFFFFFFUL) { + fputs("too many unique xattr keys\n", stderr); + goto fail_kv; + } + + if (val_idx > 0xFFFFFFFFUL) { + fputs("too many unique xattr values\n", stderr); + goto fail_kv; + } + } + + node->xattr->attr[i].key_index = key_idx; + node->xattr->attr[i].value_index = val_idx; + + free(key); + free(val); + } + + return 0; +fail_kv: + free(val); +fail_key: + free(key); + return -1; +} + +int xattr_reader_restore_node(xattr_reader_t *xr, fstree_t *fs, + tree_node_t *node, uint32_t xattr) +{ + sqfs_xattr_id_t desc; + tree_xattr_t *it; + + if (xr->kvrd == NULL || xr->idrd == NULL) + return 0; + + if (xattr == 0xFFFFFFFF) + return 0; + + for (it = fs->xattr; it != NULL; it = it->next) { + if (it->index == xattr) { + node->xattr = it; + return 0; + } + } + + if (get_xattr_desc(xr, xattr, &desc)) + return -1; + + node->xattr = alloc_flex(sizeof(*node->xattr), + sizeof(node->xattr->attr[0]), desc.count); + if (node->xattr == NULL) { + perror("creating xattr structure"); + return -1; + } + + node->xattr->num_attr = desc.count; + node->xattr->max_attr = desc.count; + node->xattr->block = xr->xattr_start + (desc.xattr >> 16); + node->xattr->offset = desc.xattr & 0xFFFF; + node->xattr->size = desc.size; + node->xattr->index = xattr; + node->xattr->owner = node; + + if (restore_kv_pairs(xr, fs, node)) { + free(node->xattr); + return -1; + } + + node->xattr->next = fs->xattr; + fs->xattr = node->xattr; + return 0; +} + +void xattr_reader_destroy(xattr_reader_t *xr) +{ + if (xr->kvrd != NULL) + meta_reader_destroy(xr->kvrd); + + if (xr->idrd != NULL) + meta_reader_destroy(xr->idrd); + + free(xr->id_block_starts); + free(xr); +} + +xattr_reader_t *xattr_reader_create(int sqfsfd, sqfs_super_t *super, + compressor_t *cmp) +{ + xattr_reader_t *xr = calloc(1, sizeof(*xr)); + + if (xr == NULL) { + perror("creating xattr reader"); + return NULL; + } + + if (super->flags & SQFS_FLAG_NO_XATTRS) + return xr; + + if (super->xattr_id_table_start == 0xFFFFFFFFFFFFFFFF) + return xr; + + if (get_id_block_locations(xr, sqfsfd, super)) + goto fail; + + xr->idrd = meta_reader_create(sqfsfd, cmp, + super->id_table_start, + super->bytes_used); + if (xr->idrd == NULL) + goto fail; + + xr->kvrd = meta_reader_create(sqfsfd, cmp, + super->id_table_start, + super->bytes_used); + if (xr->kvrd == NULL) + goto fail; + + xr->super = super; + return xr; +fail: + xattr_reader_destroy(xr); + return NULL; +} |