summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-05-03 09:38:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-05-03 10:37:22 -0700
commit1dbc0a9515fdf1f0b9d6c9b1954a347c94e5f5f9 (patch)
treed7c7e10a98b14677622dd61309bf81cd57193841
parentb9bd9f605c4a6f04a83e6640a7d1d6dda80f17ca (diff)
downloadlinux-1dbc0a9515fdf1f0b9d6c9b1954a347c94e5f5f9.tar.gz
linux-1dbc0a9515fdf1f0b9d6c9b1954a347c94e5f5f9.tar.bz2
linux-1dbc0a9515fdf1f0b9d6c9b1954a347c94e5f5f9.zip
x86: mm: remove 'sign' games from LAM untagged_addr*() macros
The intent of the sign games was to not modify kernel addresses when untagging them. However, that had two issues: (a) it didn't actually work as intended, since the mask was calculated as 'addr >> 63' on an _unsigned_ address. So instead of getting a mask of all ones for kernel addresses, you just got '1'. (b) untagging a kernel address isn't actually a valid operation anyway. Now, (a) had originally been true for both 'untagged_addr()' and the remote version of it, but had accidentally been fixed for the regular version of untagged_addr() by commit e0bddc19ba95 ("x86/mm: Reduce untagged_addr() overhead for systems without LAM"). That one rewrote the shift to be part of the alternative asm code, and in the process changed the unsigned shift into a signed 'sar' instruction. And while it is true that we don't want to turn what looks like a kernel address into a user address by masking off the high bit, that doesn't need these sign masking games - all it needs is that the mm context 'untag_mask' value has the high bit set. Which it always does. So simplify the code by just removing the superfluous (and in the case of untagged_addr_remote(), still buggy) sign bit games in the address masking. Acked-by: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/uaccess_64.h18
1 files changed, 3 insertions, 15 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 20411e69e67f..e5b23e917f41 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -15,25 +15,17 @@
#ifdef CONFIG_ADDRESS_MASKING
/*
* Mask out tag bits from the address.
- *
- * Magic with the 'sign' allows to untag userspace pointer without any branches
- * while leaving kernel addresses intact.
*/
static inline unsigned long __untagged_addr(unsigned long addr)
{
- long sign;
-
/*
* Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
* in alternative instructions. The relocation gets wrong when gets
* copied to the target place.
*/
asm (ALTERNATIVE("",
- "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
- "or %%gs:tlbstate_untag_mask, %[sign]\n\t"
- "and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
- : [addr] "+r" (addr), [sign] "=r" (sign)
- : "m" (tlbstate_untag_mask), "[sign]" (addr));
+ "and %%gs:tlbstate_untag_mask, %[addr]\n\t", X86_FEATURE_LAM)
+ : [addr] "+r" (addr) : "m" (tlbstate_untag_mask));
return addr;
}
@@ -46,12 +38,8 @@ static inline unsigned long __untagged_addr(unsigned long addr)
static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
unsigned long addr)
{
- long sign = addr >> 63;
-
mmap_assert_locked(mm);
- addr &= (mm)->context.untag_mask | sign;
-
- return addr;
+ return addr & (mm)->context.untag_mask;
}
#define untagged_addr_remote(mm, addr) ({ \