summaryrefslogtreecommitdiffstats
path: root/util/cbfstool/cbfs-mkpayload.c
diff options
context:
space:
mode:
authorGabe Black <gabeblack@google.com>2014-02-21 01:01:06 -0800
committerMarc Jones <marc.jones@se-eng.com>2014-10-28 17:08:29 +0100
commit845aa1416d334872b904ce8e04659511594b8c24 (patch)
treead2797362c98e6bb56887c0f7501bfd1be9d49f9 /util/cbfstool/cbfs-mkpayload.c
parent3e72ecfd3503553c6cdf3f1c56466f6524d57f1e (diff)
downloadcoreboot-845aa1416d334872b904ce8e04659511594b8c24.tar.gz
coreboot-845aa1416d334872b904ce8e04659511594b8c24.tar.bz2
coreboot-845aa1416d334872b904ce8e04659511594b8c24.zip
cbfstool: If compression fails, warn and use the uncompressed data.
The LZMA compression algorithm, currently the only one available, will fail if you ask it to write more data to the output than you've given it space for. The code that calls into LZMA allocates an output buffer the same size as the input, so if compression increases the size of the output the call will fail. The caller(s) were written to assume that the call succeeded and check the returned length to see if the size would have increased, but that will never happen with LZMA. Rather than try to rework the LZMA library to dynamically resize the output buffer or try to guess what the maximal size the data could expand to is, this change makes the caller simply print a warning and disable compression if the call failed for some reason. This may lead to images that are larger than necessary if compression fails for some other reason and the user doesn't notice, but since compression errors were ignored entirely until very recently that will hopefully not be a problem in practice, and we should be guaranteed to at least produce a correct image. Original-Change-Id: I5f59529c2d48e9c4c2e011018b40ec336c4fcca8 Original-Signed-off-by: Gabe Black <gabeblack@google.com> Original-Reviewed-on: https://chromium-review.googlesource.com/187365 Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> Original-Tested-by: Gabe Black <gabeblack@chromium.org> Original-Commit-Queue: Gabe Black <gabeblack@chromium.org> (cherry picked from commit b9f622a554d5fb9a9aff839c64e11acb27785f13) Signed-off-by: Isaac Christensen <isaac.christensen@se-eng.com> Change-Id: I5f59529c2d48e9c4c2e011018b40ec336c4fcca8 Reviewed-on: http://review.coreboot.org/6958 Tested-by: build bot (Jenkins) Reviewed-by: Patrick Georgi <pgeorgi@google.com>
Diffstat (limited to 'util/cbfstool/cbfs-mkpayload.c')
-rw-r--r--util/cbfstool/cbfs-mkpayload.c54
1 files changed, 24 insertions, 30 deletions
diff --git a/util/cbfstool/cbfs-mkpayload.c b/util/cbfstool/cbfs-mkpayload.c
index 38cc482d22a9..d5bcca011eb4 100644
--- a/util/cbfstool/cbfs-mkpayload.c
+++ b/util/cbfstool/cbfs-mkpayload.c
@@ -206,26 +206,24 @@ int parse_elf_to_payload(const struct buffer *input,
segs[segments].type = PAYLOAD_SEGMENT_DATA;
segs[segments].load_addr = phdr[i].p_paddr;
segs[segments].mem_len = phdr[i].p_memsz;
- segs[segments].compression = algo;
segs[segments].offset = doffset;
+ /* If the compression failed or made the section is larger,
+ use the original stuff */
+
int len;
if (compress((char *)&header[phdr[i].p_offset],
- phdr[i].p_filesz, output->data + doffset, &len)) {
- buffer_delete(output);
- ret = -1;
- goto out;
- }
- segs[segments].len = len;
-
- /* If the compressed section is larger, then use the
- original stuff */
-
- if ((unsigned int)len > phdr[i].p_filesz) {
+ phdr[i].p_filesz, output->data + doffset, &len) ||
+ (unsigned int)len > phdr[i].p_filesz) {
+ WARN("Compression failed or would make the data bigger "
+ "- disabled.\n");
segs[segments].compression = 0;
segs[segments].len = phdr[i].p_filesz;
memcpy(output->data + doffset,
&header[phdr[i].p_offset], phdr[i].p_filesz);
+ } else {
+ segs[segments].compression = algo;
+ segs[segments].len = len;
}
doffset += segs[segments].len;
@@ -275,15 +273,13 @@ int parse_flat_binary_to_payload(const struct buffer *input,
segs[0].mem_len = input->size;
segs[0].offset = doffset;
- if (compress(input->data, input->size, output->data + doffset, &len)) {
- buffer_delete(output);
- return -1;
- }
- segs[0].compression = algo;
- segs[0].len = len;
-
- if ((unsigned int)len >= input->size) {
- WARN("Compressing data would make it bigger - disabled.\n");
+ if (!compress(input->data, input->size, output->data + doffset, &len) &&
+ (unsigned int)len < input->size) {
+ segs[0].compression = algo;
+ segs[0].len = len;
+ } else {
+ WARN("Compression failed or would make the data bigger "
+ "- disabled.\n");
segs[0].compression = 0;
segs[0].len = input->size;
memcpy(output->data + doffset, input->data, input->size);
@@ -404,15 +400,13 @@ int parse_fv_to_payload(const struct buffer *input,
segs[0].mem_len = input->size;
segs[0].offset = doffset;
- if (compress(input->data, input->size, output->data + doffset, &len)) {
- buffer_delete(output);
- return -1;
- }
- segs[0].compression = algo;
- segs[0].len = len;
-
- if ((unsigned int)len >= input->size) {
- WARN("Compressing data would make it bigger - disabled.\n");
+ if (!compress(input->data, input->size, output->data + doffset, &len) &&
+ (unsigned int)len < input->size) {
+ segs[0].compression = algo;
+ segs[0].len = len;
+ } else {
+ WARN("Compression failed or would make the data bigger "
+ "- disabled.\n");
segs[0].compression = 0;
segs[0].len = input->size;
memcpy(output->data + doffset, input->data, input->size);