diff options
author | Christophe Leroy <christophe.leroy@csgroup.eu> | 2021-06-03 08:41:38 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-06-17 00:09:08 +1000 |
commit | 7235bb3593781ed022d0714a73c2c0d8eb8a835f (patch) | |
tree | 73e0a356232dd321befd6264fa748321f7ba3287 | |
parent | 91bb30822a2e1d7900f9f42e9e92647a9015f979 (diff) | |
download | linux-stable-7235bb3593781ed022d0714a73c2c0d8eb8a835f.tar.gz linux-stable-7235bb3593781ed022d0714a73c2c0d8eb8a835f.tar.bz2 linux-stable-7235bb3593781ed022d0714a73c2c0d8eb8a835f.zip |
powerpc/32s: move CTX_TO_VSID() into mmu-hash.h
In order to reuse it in switch_mmu_context(), this
patch moves CTX_TO_VSID() macro into asm/book3s/32/mmu-hash.h
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/26b36ef2939234a04b37baf6ffe50cba81f5d1b7.1622708530.git.christophe.leroy@csgroup.eu
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/mmu-hash.h | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s32/mmu_context.c | 13 |
3 files changed, 10 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index cc0284bbac86..583388399b1b 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -67,6 +67,16 @@ struct ppc_bat { #ifndef __ASSEMBLY__ /* + * This macro defines the mapping from contexts to VSIDs (virtual + * segment IDs). We use a skew on both the context and the high 4 bits + * of the 32-bit virtual address (the "effective segment ID") in order + * to spread out the entries in the MMU hash table. Note, if this + * function is changed then hash functions will have to be + * changed to correspond. + */ +#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) + +/* * Hardware Page Table Entry * Note that the xpn and x bitfields are used only by processors that * support extended addressing; otherwise, those bits are reserved. diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index e8e7b2c530d1..4b3a8d80cfa3 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -353,9 +353,6 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) preempt_enable(); } -/* From mm/mmu_context_hash32.c */ -#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) - int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); diff --git a/arch/powerpc/mm/book3s32/mmu_context.c b/arch/powerpc/mm/book3s32/mmu_context.c index 218996e40a8e..c949363f315f 100644 --- a/arch/powerpc/mm/book3s32/mmu_context.c +++ b/arch/powerpc/mm/book3s32/mmu_context.c @@ -39,19 +39,6 @@ #define LAST_CONTEXT 32767 #define FIRST_CONTEXT 1 -/* - * This function defines the mapping from contexts to VSIDs (virtual - * segment IDs). We use a skew on both the context and the high 4 bits - * of the 32-bit virtual address (the "effective segment ID") in order - * to spread out the entries in the MMU hash table. Note, if this - * function is changed then arch/ppc/mm/hashtable.S will have to be - * changed to correspond. - * - * - * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ - * & 0xffffff) - */ - static unsigned long next_mmu_context; static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; |