summaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorPatrick Georgi <pgeorgi@chromium.org>2017-01-11 18:38:11 +0100
committerPatrick Georgi <pgeorgi@google.com>2017-01-13 13:50:46 +0100
commitecaa570b60a27d0b1d4c9742013d65591a0ecc92 (patch)
treee73363c9fb9de0e226bcf9b56d28d58f00ea44f0 /util
parentc88d16baaf3e88029b40d43eb254e90613b95187 (diff)
util/cbfstool: Enable adding precompressed files to cbfs
cbfstool ... add ... -c precompression assumes the input file to be created by cbfs-compression-tool's compress command and uses that to add the file with correct metadata. When adding the locale_*.bin files to Chrome OS images, this provides a nice speedup (since we can parallelize the precompression and avoid compressing everything twice) while creating a bit-identical file. Change-Id: Iadd106672c505909528b55e2cd43c914b95b6c6d Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Reviewed-on: https://review.coreboot.org/18102 Tested-by: build bot (Jenkins) Reviewed-by: Nico Huber <nico.h@gmx.de>
Diffstat (limited to 'util')
-rw-r--r--util/cbfstool/cbfstool.c67
1 files changed, 45 insertions, 22 deletions
diff --git a/util/cbfstool/cbfstool.c b/util/cbfstool/cbfstool.c
index de50ff3625..9b5343eb17 100644
--- a/util/cbfstool/cbfstool.c
+++ b/util/cbfstool/cbfstool.c
@@ -79,6 +79,7 @@ static struct param {
bool machine_parseable;
int fit_empty_entries;
enum comp_algo compression;
+ int precompression;
enum vb2_hash_algorithm hash;
/* for linux payloads */
char *initrd;
@@ -439,33 +440,51 @@ static int cbfstool_convert_raw(struct buffer *buffer,
unused uint32_t *offset, struct cbfs_file *header)
{
char *compressed;
- int compressed_size;
-
- comp_func_ptr compress = compression_function(param.compression);
- if (!compress)
- return -1;
- compressed = calloc(buffer->size, 1);
-
- if (compress(buffer->data, buffer->size,
- compressed, &compressed_size)) {
- WARN("Compression failed - disabled\n");
+ int decompressed_size, compressed_size;
+ comp_func_ptr compress;
+
+ decompressed_size = buffer->size;
+ if (param.precompression) {
+ param.compression = le32toh(((uint32_t *)buffer->data)[0]);
+ decompressed_size = le32toh(((uint32_t *)buffer->data)[1]);
+ compressed_size = buffer->size - 8;
+ compressed = malloc(compressed_size);
+ if (!compressed)
+ return -1;
+ memcpy(compressed, buffer->data + 8, compressed_size);
} else {
- struct cbfs_file_attr_compression *attrs =
- (struct cbfs_file_attr_compression *)
- cbfs_add_file_attr(header,
- CBFS_FILE_ATTR_TAG_COMPRESSION,
- sizeof(struct cbfs_file_attr_compression));
- if (attrs == NULL)
+ compress = compression_function(param.compression);
+ if (!compress)
+ return -1;
+ compressed = calloc(buffer->size, 1);
+ if (!compressed)
return -1;
- attrs->compression = htonl(param.compression);
- attrs->decompressed_size = htonl(buffer->size);
- free(buffer->data);
- buffer->data = compressed;
- buffer->size = compressed_size;
+ if (compress(buffer->data, buffer->size,
+ compressed, &compressed_size)) {
+ WARN("Compression failed - disabled\n");
+ free(compressed);
+ return 0;
+ }
+ }
- header->len = htonl(buffer->size);
+ struct cbfs_file_attr_compression *attrs =
+ (struct cbfs_file_attr_compression *)
+ cbfs_add_file_attr(header,
+ CBFS_FILE_ATTR_TAG_COMPRESSION,
+ sizeof(struct cbfs_file_attr_compression));
+ if (attrs == NULL) {
+ free(compressed);
+ return -1;
}
+ attrs->compression = htonl(param.compression);
+ attrs->decompressed_size = htonl(decompressed_size);
+
+ free(buffer->data);
+ buffer->data = compressed;
+ buffer->size = compressed_size;
+
+ header->len = htonl(buffer->size);
return 0;
}
@@ -1347,6 +1366,10 @@ int main(int argc, char **argv)
optarg);
break;
case 'c': {
+ if (strcmp(optarg, "precompression") == 0) {
+ param.precompression = 1;
+ break;
+ }
int algo = cbfs_parse_comp_algo(optarg);
if (algo >= 0)
param.compression = algo;