diff options
author | Yunfeng Ye <yeyunfeng@huawei.com> | 2021-12-09 09:42:25 +0800 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2021-12-10 18:24:19 +0000 |
commit | a3a5b763410c7bceacf41a52071134d9dc26202a (patch) | |
tree | 69da3a9f6dc45b28525dcfb61a929e419d1fe08d | |
parent | 7afccde389dcfaca793a0d909f8cb7412e1d1dbe (diff) | |
download | linux-stable-a3a5b763410c7bceacf41a52071134d9dc26202a.tar.gz linux-stable-a3a5b763410c7bceacf41a52071134d9dc26202a.tar.bz2 linux-stable-a3a5b763410c7bceacf41a52071134d9dc26202a.zip |
arm64: mm: Rename asid2idx() to ctxid2asid()
The commit 0c8ea531b774 ("arm64: mm: Allocate ASIDs in pairs") introduce
the asid2idx and idx2asid macro, but these macros are not really useful
after the commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if
KPTI is not in use").
The code "(asid & ~ASID_MASK)" can be instead by a macro, which is the
same code with asid2idx(). So rename it to ctxid2asid() for a better
understanding.
Also we add asid2ctxid() macro, the contextid can be generated based on
the asid and generation through this macro.
Signed-off-by: Yunfeng Ye <yeyunfeng@huawei.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/r/c31516eb-6d15-94e0-421c-305fc010ea79@huawei.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/mm/context.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index cd72576ae2b7..bbc2708fe928 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -35,8 +35,8 @@ static unsigned long *pinned_asid_map; #define ASID_FIRST_VERSION (1UL << asid_bits) #define NUM_USER_ASIDS ASID_FIRST_VERSION -#define asid2idx(asid) ((asid) & ~ASID_MASK) -#define idx2asid(idx) asid2idx(idx) +#define ctxid2asid(asid) ((asid) & ~ASID_MASK) +#define asid2ctxid(asid, genid) ((asid) | (genid)) /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -120,7 +120,7 @@ static void flush_context(void) */ if (asid == 0) asid = per_cpu(reserved_asids, i); - __set_bit(asid2idx(asid), asid_map); + __set_bit(ctxid2asid(asid), asid_map); per_cpu(reserved_asids, i) = asid; } @@ -162,7 +162,7 @@ static u64 new_context(struct mm_struct *mm) u64 generation = atomic64_read(&asid_generation); if (asid != 0) { - u64 newasid = generation | (asid & ~ASID_MASK); + u64 newasid = asid2ctxid(ctxid2asid(asid), generation); /* * If our current ASID was active during a rollover, we @@ -183,7 +183,7 @@ static u64 new_context(struct mm_struct *mm) * We had a valid ASID in a previous life, so try to re-use * it if possible. */ - if (!__test_and_set_bit(asid2idx(asid), asid_map)) + if (!__test_and_set_bit(ctxid2asid(asid), asid_map)) return newasid; } @@ -209,7 +209,7 @@ static u64 new_context(struct mm_struct *mm) set_asid: __set_bit(asid, asid_map); cur_idx = asid; - return idx2asid(asid) | generation; + return asid2ctxid(asid, generation); } void check_and_switch_context(struct mm_struct *mm) @@ -300,13 +300,13 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm) } nr_pinned_asids++; - __set_bit(asid2idx(asid), pinned_asid_map); + __set_bit(ctxid2asid(asid), pinned_asid_map); refcount_set(&mm->context.pinned, 1); out_unlock: raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); - asid &= ~ASID_MASK; + asid = ctxid2asid(asid); /* Set the equivalent of USER_ASID_BIT */ if (asid && arm64_kernel_unmapped_at_el0()) @@ -327,7 +327,7 @@ void arm64_mm_context_put(struct mm_struct *mm) raw_spin_lock_irqsave(&cpu_asid_lock, flags); if (refcount_dec_and_test(&mm->context.pinned)) { - __clear_bit(asid2idx(asid), pinned_asid_map); + __clear_bit(ctxid2asid(asid), pinned_asid_map); nr_pinned_asids--; } |