summaryrefslogtreecommitdiffstats
path: root/util
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2017-02-03 12:50:03 -0800
committerMartin Roth <martinroth@google.com>2017-04-14 16:33:27 +0200
commit127a79e0b6cc4d25560446efcccca8a8da15a9b4 (patch)
treed3c8cd8e6eb22c7abeee5314a20c77ecaf5555bf /util
parent973104ba1f3e27eb835b7e02e8e9e6c16704d702 (diff)
downloadcoreboot-127a79e0b6cc4d25560446efcccca8a8da15a9b4.tar.gz
coreboot-127a79e0b6cc4d25560446efcccca8a8da15a9b4.tar.bz2
coreboot-127a79e0b6cc4d25560446efcccca8a8da15a9b4.zip
cbmem: Add custom aligned memcpy() implementation
On some architectures (like AArch64), /dev/mem mappings outside of the area marked as normal RAM use a memory type that does not support unaligned accesses. The libc memcpy() implementation on these architectures may not know or expect that and make an unaligned access for certain source/dest/length alignments. Add a custom memcpy() implementation that takes these restrictions into account and use it anywhere we copy straight out of /dev/mem memory. Change-Id: I03eece380a14a69d4be3805ed72fba640f6f7d9c Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: https://review.coreboot.org/18300 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'util')
-rw-r--r--util/cbmem/cbmem.c32
1 files changed, 31 insertions, 1 deletions
diff --git a/util/cbmem/cbmem.c b/util/cbmem/cbmem.c
index 7b434d874c68..e60505222ab6 100644
--- a/util/cbmem/cbmem.c
+++ b/util/cbmem/cbmem.c
@@ -61,6 +61,36 @@ static uint64_t lbtable_address;
static size_t lbtable_size;
/*
+ * Some architectures map /dev/mem memory in a way that doesn't support
+ * unaligned accesses. Most normal libc memcpy()s aren't safe to use in this
+ * case, so build our own which makes sure to never do unaligned accesses on
+ * *src (*dest is fine since we never map /dev/mem for writing).
+ */
+static void *aligned_memcpy(void *dest, const void *src, size_t n)
+{
+ u8 *d = dest;
+ const volatile u8 *s = src; /* volatile to prevent optimization */
+
+ while ((uintptr_t)s & (sizeof(size_t) - 1)) {
+ if (n-- == 0)
+ return dest;
+ *d++ = *s++;
+ }
+
+ while (n >= sizeof(size_t)) {
+ *(size_t *)d = *(const volatile size_t *)s;
+ d += sizeof(size_t);
+ s += sizeof(size_t);
+ n -= sizeof(size_t);
+ }
+
+ while (n-- > 0)
+ *d++ = *s++;
+
+ return dest;
+}
+
+/*
* calculate ip checksum (16 bit quantities) on a passed in buffer. In case
* the buffer length is odd last byte is excluded from the calculation
*/
@@ -608,7 +638,7 @@ static void dump_console(void)
console_p = map_memory_size((unsigned long)console.cbmem_addr,
size + sizeof(size) + sizeof(cursor), 1);
- memcpy(console_c, console_p + 8, size);
+ aligned_memcpy(console_c, console_p + 8, size);
printf("%s\n", console_c);
if (size < cursor)