summaryrefslogtreecommitdiffstats
path: root/src/commonlib
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2024-01-30 16:51:05 -0800
committerJulius Werner <jwerner@chromium.org>2024-02-02 22:48:27 +0000
commitde37109767b6b415778f34cbac196c8418f7e371 (patch)
tree44ede1025fd6058c09cc99c8e7d7122a64203641 /src/commonlib
parent416cc665929e4e66bcab3e395daa031401a61fe8 (diff)
downloadcoreboot-de37109767b6b415778f34cbac196c8418f7e371.tar.gz
coreboot-de37109767b6b415778f34cbac196c8418f7e371.tar.bz2
coreboot-de37109767b6b415778f34cbac196c8418f7e371.zip
lib: Move IP checksum to commonlib
This patch moves the IP checksum algorithm into commonlib to prepare for it being shared with libpayload. The current implementation is ancient and pretty hard to read (and does some unnecessary questionable things like the type-punning stuff which leads to suboptimal code generation), so this reimplements it from scratch (that also helps with the licensing). This algorithm is prepared to take in a pre-calculated "wide" checksum in a machine-register-sized data type which is then narrowed down to 16 bits (see RFC 1071 for why that's valid). This isn't used yet (and the code will get optimized out), but will be used later in this patch series for architecture-specific optimization. Change-Id: Ic04c714c00439a17fc04a8a6e730cc2aa19b8e68 Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: https://review.coreboot.org/c/coreboot/+/80251 Reviewed-by: Yidi Lin <yidilin@google.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Jakub Czapiga <czapiga@google.com>
Diffstat (limited to 'src/commonlib')
-rw-r--r--src/commonlib/Makefile.mk2
-rw-r--r--src/commonlib/bsd/include/commonlib/bsd/ipchksum.h12
-rw-r--r--src/commonlib/bsd/ipchksum.c52
3 files changed, 66 insertions, 0 deletions
diff --git a/src/commonlib/Makefile.mk b/src/commonlib/Makefile.mk
index 70e731df354d..7ec4de91c013 100644
--- a/src/commonlib/Makefile.mk
+++ b/src/commonlib/Makefile.mk
@@ -61,3 +61,5 @@ smm-y += bsd/elog.c
decompressor-y += bsd/gcd.c
all-y += bsd/gcd.c
+
+all-y += bsd/ipchksum.c
diff --git a/src/commonlib/bsd/include/commonlib/bsd/ipchksum.h b/src/commonlib/bsd/include/commonlib/bsd/ipchksum.h
new file mode 100644
index 000000000000..91d6872d9f01
--- /dev/null
+++ b/src/commonlib/bsd/include/commonlib/bsd/ipchksum.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _COMMONLIB_BSD_IPCHKSUM_H_
+#define _COMMONLIB_BSD_IPCHKSUM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+uint16_t ipchksum(const void *data, size_t size);
+uint16_t ipchksum_add(size_t offset, uint16_t first, uint16_t second);
+
+#endif /* _COMMONLIB_BSD_IPCHKSUM_H_ */
diff --git a/src/commonlib/bsd/ipchksum.c b/src/commonlib/bsd/ipchksum.c
new file mode 100644
index 000000000000..a40b86cbb40b
--- /dev/null
+++ b/src/commonlib/bsd/ipchksum.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+
+#include <commonlib/bsd/ipchksum.h>
+
+/* See RFC 1071 for mathematical explanations of why we can first sum in a larger register and
+ then narrow down, why we don't need to worry about endianness, etc. */
+uint16_t ipchksum(const void *data, size_t size)
+{
+ const uint8_t *p1 = data;
+ unsigned long wide_sum = 0;
+ uint32_t sum = 0;
+ size_t i = 0;
+
+ while (wide_sum) {
+ sum += wide_sum & 0xFFFF;
+ wide_sum >>= 16;
+ }
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ for (; i < size; i++) {
+ uint32_t v = p1[i];
+ if (i % 2)
+ v <<= 8;
+ sum += v;
+
+ /* Doing this unconditionally seems to be faster. */
+ sum = (sum & 0xFFFF) + (sum >> 16);
+ }
+
+ return (uint16_t)~sum;
+}
+
+uint16_t ipchksum_add(size_t offset, uint16_t first, uint16_t second)
+{
+ first = ~first;
+ second = ~second;
+
+ /*
+ * Since the checksum is calculated in 16-bit chunks, if the offset at which
+ * the data covered by the second checksum would start (if both data streams
+ * came one after the other) is odd, that means the second stream starts in
+ * the middle of a 16-bit chunk. This means the second checksum is byte
+ * swapped compared to what we need it to be, and we must swap it back.
+ */
+ if (offset % 2)
+ second = (second >> 8) | (second << 8);
+
+ uint32_t sum = first + second;
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ return (uint16_t)~sum;
+}