summaryrefslogtreecommitdiffstats
path: root/arch/alpha/include/asm
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2023-12-13 18:00:54 +0100
committerArnd Bergmann <arnd@arndb.de>2024-05-06 12:05:00 +0200
commita4184174be36369c3af8d937e165f28a43ef1e02 (patch)
tree2502518abc421253bc3f18ebd472b7cea0cbbe32 /arch/alpha/include/asm
parent4bf859076b16f1b7b096ac1f98039a362cee999e (diff)
downloadlinux-stable-a4184174be36369c3af8d937e165f28a43ef1e02.tar.gz
linux-stable-a4184174be36369c3af8d937e165f28a43ef1e02.tar.bz2
linux-stable-a4184174be36369c3af8d937e165f28a43ef1e02.zip
alpha: drop pre-EV56 support
All EV4 machines are already gone, and the remaining EV5 based machines all support the slightly more modern EV56 generation as well. Debian only supports EV56 and later. Drop both of these and build kernels optimized for EV56 and higher when the "generic" options is selected, tuning for an out-of-order EV6 pipeline, same as Debian userspace. Since this was the only supported architecture without 8-bit and 16-bit stores, common kernel code no longer has to worry about aligning struct members, and existing workarounds from the block and tty layers can be removed. The alpha memory management code no longer needs an abstraction for the differences between EV4 and EV5+. Link: https://lists.debian.org/debian-alpha/2023/05/msg00009.html Acked-by: Paul E. McKenney <paulmck@kernel.org> Acked-by: Matt Turner <mattst88@gmail.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/alpha/include/asm')
-rw-r--r--arch/alpha/include/asm/elf.h4
-rw-r--r--arch/alpha/include/asm/machvec.h9
-rw-r--r--arch/alpha/include/asm/mmu_context.h45
-rw-r--r--arch/alpha/include/asm/special_insns.h5
-rw-r--r--arch/alpha/include/asm/tlbflush.h37
-rw-r--r--arch/alpha/include/asm/uaccess.h80
6 files changed, 7 insertions, 173 deletions
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index e6da23f1da83..4d7c46f50382 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -133,9 +133,7 @@ extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
#define ELF_PLATFORM \
({ \
enum implver_enum i_ = implver(); \
- ( i_ == IMPLVER_EV4 ? "ev4" \
- : i_ == IMPLVER_EV5 \
- ? (amask(AMASK_BWX) ? "ev5" : "ev56") \
+ ( i_ == IMPLVER_EV5 ? "ev56" \
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index 8623f995d34c..490fc880bb3f 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -72,15 +72,6 @@ struct alpha_machine_vector
int (*mv_is_ioaddr)(unsigned long);
int (*mv_is_mmio)(const volatile void __iomem *);
- void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
- struct task_struct *);
- void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
-
- void (*mv_flush_tlb_current)(struct mm_struct *);
- void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr);
-
void (*update_irq_hw)(unsigned long, unsigned long, int);
void (*ack_irq)(unsigned long);
void (*device_interrupt)(unsigned long vector);
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 29a3e3a1f02b..eee8fe836a59 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -71,9 +71,7 @@ __reload_thread(struct pcb_struct *pcb)
#ifdef CONFIG_ALPHA_GENERIC
# define MAX_ASN (alpha_mv.max_asn)
#else
-# ifdef CONFIG_ALPHA_EV4
-# define MAX_ASN EV4_MAX_ASN
-# elif defined(CONFIG_ALPHA_EV5)
+# if defined(CONFIG_ALPHA_EV56)
# define MAX_ASN EV5_MAX_ASN
# else
# define MAX_ASN EV6_MAX_ASN
@@ -162,26 +160,6 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
}
-__EXTERN_INLINE void
-ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *next)
-{
- /* As described, ASN's are broken for TLB usage. But we can
- optimize for switching between threads -- if the mm is
- unchanged from current we needn't flush. */
- /* ??? May not be needed because EV4 PALcode recognizes that
- ASN's are broken and does a tbiap itself on swpctx, under
- the "Must set ASN or flush" rule. At least this is true
- for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
- I'm going to leave this here anyway, just to Be Sure. -- r~ */
- if (prev_mm != next_mm)
- tbiap();
-
- /* Do continue to allocate ASNs, because we can still use them
- to avoid flushing the icache. */
- ev5_switch_mm(prev_mm, next_mm, next);
-}
-
extern void __load_new_mm_context(struct mm_struct *);
asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr,
long cause, struct pt_regs *regs);
@@ -209,25 +187,8 @@ ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
__load_new_mm_context(next_mm);
}
-__EXTERN_INLINE void
-ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
-{
- __load_new_mm_context(next_mm);
- tbiap();
-}
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
-# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev4_activate_mm((x),(y))
-# else
-# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
-# define activate_mm(x,y) ev5_activate_mm((x),(y))
-# endif
-#endif
+#define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
+#define activate_mm(x,y) ev5_activate_mm((x),(y))
#define init_new_context init_new_context
static inline int
diff --git a/arch/alpha/include/asm/special_insns.h b/arch/alpha/include/asm/special_insns.h
index ca2c5c30b22e..798d0bdb11f9 100644
--- a/arch/alpha/include/asm/special_insns.h
+++ b/arch/alpha/include/asm/special_insns.h
@@ -15,10 +15,7 @@ enum implver_enum {
(enum implver_enum) __implver; })
#else
/* Try to eliminate some dead code. */
-#ifdef CONFIG_ALPHA_EV4
-#define implver() IMPLVER_EV4
-#endif
-#ifdef CONFIG_ALPHA_EV5
+#ifdef CONFIG_ALPHA_EV56
#define implver() IMPLVER_EV5
#endif
#if defined(CONFIG_ALPHA_EV6)
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h
index 94dc37cf873a..ba4b359d6c39 100644
--- a/arch/alpha/include/asm/tlbflush.h
+++ b/arch/alpha/include/asm/tlbflush.h
@@ -14,16 +14,6 @@
extern void __load_new_mm_context(struct mm_struct *);
-/* Use a few helper functions to hide the ugly broken ASN
- numbers on early Alphas (ev4 and ev45). */
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current(struct mm_struct *mm)
-{
- __load_new_mm_context(mm);
- tbiap();
-}
-
__EXTERN_INLINE void
ev5_flush_tlb_current(struct mm_struct *mm)
{
@@ -35,19 +25,6 @@ ev5_flush_tlb_current(struct mm_struct *mm)
specific icache page. */
__EXTERN_INLINE void
-ev4_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- int tbi_flag = 2;
- if (vma->vm_flags & VM_EXEC) {
- __load_new_mm_context(mm);
- tbi_flag = 3;
- }
- tbi(tbi_flag, addr);
-}
-
-__EXTERN_INLINE void
ev5_flush_tlb_current_page(struct mm_struct * mm,
struct vm_area_struct *vma,
unsigned long addr)
@@ -59,18 +36,8 @@ ev5_flush_tlb_current_page(struct mm_struct * mm,
}
-#ifdef CONFIG_ALPHA_GENERIC
-# define flush_tlb_current alpha_mv.mv_flush_tlb_current
-# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define flush_tlb_current ev4_flush_tlb_current
-# define flush_tlb_current_page ev4_flush_tlb_current_page
-# else
-# define flush_tlb_current ev5_flush_tlb_current
-# define flush_tlb_current_page ev5_flush_tlb_current_page
-# endif
-#endif
+#define flush_tlb_current ev5_flush_tlb_current
+#define flush_tlb_current_page ev5_flush_tlb_current_page
#ifdef __MMU_EXTERN_INLINE
#undef __EXTERN_INLINE
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index c32c2584c0b7..ef295cbb797c 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -96,9 +96,6 @@ struct __large_struct { unsigned long buf[100]; };
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __get_user_16(addr) \
__asm__("1: ldwu %0,%2\n" \
"2:\n" \
@@ -112,33 +109,6 @@ struct __large_struct { unsigned long buf[100]; };
EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- load, so we have to do a general unaligned operation. */
-
-#define __get_user_16(addr) \
-{ \
- long __gu_tmp; \
- __asm__("1: ldq_u %0,0(%3)\n" \
- "2: ldq_u %1,1(%3)\n" \
- " extwl %0,%3,%0\n" \
- " extwh %1,%3,%1\n" \
- " or %0,%1,%0\n" \
- "3:\n" \
- EXC(1b,3b,%0,%2) \
- EXC(2b,3b,%0,%2) \
- : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
- : "r"(addr), "2"(__gu_err)); \
-}
-
-#define __get_user_8(addr) \
- __asm__("1: ldq_u %0,0(%2)\n" \
- " extbl %0,%2,%0\n" \
- "2:\n" \
- EXC(1b,2b,%0,%1) \
- : "=&r"(__gu_val), "=r"(__gu_err) \
- : "r"(addr), "1"(__gu_err))
-#endif
extern void __put_user_unknown(void);
@@ -192,9 +162,6 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#ifdef __alpha_bwx__
-/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
-
#define __put_user_16(x, addr) \
__asm__ __volatile__("1: stw %r2,%1\n" \
"2:\n" \
@@ -208,53 +175,6 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
-#else
-/* Unfortunately, we can't get an unaligned access trap for the sub-word
- write, so we have to do a general unaligned operation. */
-
-#define __put_user_16(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
- __asm__ __volatile__( \
- "1: ldq_u %2,1(%5)\n" \
- "2: ldq_u %1,0(%5)\n" \
- " inswh %6,%5,%4\n" \
- " inswl %6,%5,%3\n" \
- " mskwh %2,%5,%2\n" \
- " mskwl %1,%5,%1\n" \
- " or %2,%4,%2\n" \
- " or %1,%3,%1\n" \
- "3: stq_u %2,1(%5)\n" \
- "4: stq_u %1,0(%5)\n" \
- "5:\n" \
- EXC(1b,5b,$31,%0) \
- EXC(2b,5b,$31,%0) \
- EXC(3b,5b,$31,%0) \
- EXC(4b,5b,$31,%0) \
- : "=r"(__pu_err), "=&r"(__pu_tmp1), \
- "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
- "=&r"(__pu_tmp4) \
- : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
-}
-
-#define __put_user_8(x, addr) \
-{ \
- long __pu_tmp1, __pu_tmp2; \
- __asm__ __volatile__( \
- "1: ldq_u %1,0(%4)\n" \
- " insbl %3,%4,%2\n" \
- " mskbl %1,%4,%1\n" \
- " or %1,%2,%1\n" \
- "2: stq_u %1,0(%4)\n" \
- "3:\n" \
- EXC(1b,3b,$31,%0) \
- EXC(2b,3b,$31,%0) \
- : "=r"(__pu_err), \
- "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
- : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
-}
-#endif
-
/*
* Complex access routines