summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAlexey Brodkin <abrodkin@synopsys.com>2019-02-08 13:55:19 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-02-27 10:08:08 +0100
commit238209c654d50e74deed8ec1fdb3ff61f4d0ea32 (patch)
treef7d5fb20c8fc9e4f8158b756baddf22ac4cbcd9b /arch
parente7264579eb80ef67efd337ffb32b7d57e7b1ea54 (diff)
downloadlinux-stable-238209c654d50e74deed8ec1fdb3ff61f4d0ea32.tar.gz
linux-stable-238209c654d50e74deed8ec1fdb3ff61f4d0ea32.tar.bz2
linux-stable-238209c654d50e74deed8ec1fdb3ff61f4d0ea32.zip
ARC: define ARCH_SLAB_MINALIGN = 8
commit b6835ea77729e7faf4656ca637ba53f42b8ee3fd upstream. The default value of ARCH_SLAB_MINALIGN in "include/linux/slab.h" is "__alignof__(unsigned long long)" which for ARC unexpectedly turns out to be 4. This is not a compiler bug, but as defined by ARC ABI [1] Thus slab allocator would allocate a struct which is 32-bit aligned, which is generally OK even if struct has long long members. There was however potetial problem when it had any atomic64_t which use LLOCKD/SCONDD instructions which are required by ISA to take 64-bit addresses. This is the problem we ran into [ 4.015732] EXT4-fs (mmcblk0p2): re-mounted. Opts: (null) [ 4.167881] Misaligned Access [ 4.172356] Path: /bin/busybox.nosuid [ 4.176004] CPU: 2 PID: 171 Comm: rm Not tainted 4.19.14-yocto-standard #1 [ 4.182851] [ 4.182851] [ECR ]: 0x000d0000 => Check Programmer's Manual [ 4.190061] [EFA ]: 0xbeaec3fc [ 4.190061] [BLINK ]: ext4_delete_entry+0x210/0x234 [ 4.190061] [ERET ]: ext4_delete_entry+0x13e/0x234 [ 4.202985] [STAT32]: 0x80080002 : IE K [ 4.207236] BTA: 0x9009329c SP: 0xbe5b1ec4 FP: 0x00000000 [ 4.212790] LPS: 0x9074b118 LPE: 0x9074b120 LPC: 0x00000000 [ 4.218348] r00: 0x00000040 r01: 0x00000021 r02: 0x00000001 ... ... [ 4.270510] Stack Trace: [ 4.274510] ext4_delete_entry+0x13e/0x234 [ 4.278695] ext4_rmdir+0xe0/0x238 [ 4.282187] vfs_rmdir+0x50/0xf0 [ 4.285492] do_rmdir+0x9e/0x154 [ 4.288802] EV_Trap+0x110/0x114 The fix is to make sure slab allocations are 64-bit aligned. Do note that atomic64_t is __attribute__((aligned(8)) which means gcc does generate 64-bit aligned references, relative to beginning of container struct. However the issue is if the container itself is not 64-bit aligned, atomic64_t ends up unaligned which is what this patch ensures. [1] https://github.com/foss-for-synopsys-dwc-arc-processors/toolchain/wiki/files/ARCv2_ABI.pdf Signed-off-by: Alexey Brodkin <abrodkin@synopsys.com> Cc: <stable@vger.kernel.org> # 4.8+ Signed-off-by: Vineet Gupta <vgupta@synopsys.com> [vgupta: reworked changelog, added dependency on LL64+LLSC] Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/cache.h11
1 files changed, 11 insertions, 0 deletions
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index ff7d3232764a..db681cf4959c 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);