summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/mach-generic/ide.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2008-09-16 19:48:51 +0200
committerRalf Baechle <ralf@linux-mips.org>2008-10-11 16:18:52 +0100
commit384740dc49ea651ba350704d13ff6be9976e37fe (patch)
treea6e80cad287ccae7a86d81bfa692fc96889c88ed /arch/mips/include/asm/mach-generic/ide.h
parente8c7c482347574ecdd45c43e32c332d5fc2ece61 (diff)
downloadlinux-384740dc49ea651ba350704d13ff6be9976e37fe.tar.gz
linux-384740dc49ea651ba350704d13ff6be9976e37fe.tar.bz2
linux-384740dc49ea651ba350704d13ff6be9976e37fe.zip
MIPS: Move headfiles to new location below arch/mips/include
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/mach-generic/ide.h')
-rw-r--r--arch/mips/include/asm/mach-generic/ide.h167
1 files changed, 167 insertions, 0 deletions
diff --git a/arch/mips/include/asm/mach-generic/ide.h b/arch/mips/include/asm/mach-generic/ide.h
new file mode 100644
index 000000000000..73008f7bdc93
--- /dev/null
+++ b/arch/mips/include/asm/mach-generic/ide.h
@@ -0,0 +1,167 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ *
+ * Copied from i386; many of the especially older MIPS or ISA-based platforms
+ * are basically identical. Using this file probably implies i8259 PIC
+ * support in a system but the very least interrupt numbers 0 - 15 need to
+ * be put aside for legacy devices.
+ */
+#ifndef __ASM_MACH_GENERIC_IDE_H
+#define __ASM_MACH_GENERIC_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/pci.h>
+#include <linux/stddef.h>
+#include <asm/processor.h>
+
+static __inline__ int ide_probe_legacy(void)
+{
+#ifdef CONFIG_PCI
+ struct pci_dev *dev;
+ /*
+ * This can be called on the ide_setup() path, super-early in
+ * boot. But the down_read() will enable local interrupts,
+ * which can cause some machines to crash. So here we detect
+ * and flag that situation and bail out early.
+ */
+ if (no_pci_devices())
+ return 0;
+ dev = pci_get_class(PCI_CLASS_BRIDGE_EISA << 8, NULL);
+ if (dev)
+ goto found;
+ dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (dev)
+ goto found;
+ return 0;
+found:
+ pci_dev_put(dev);
+ return 1;
+#elif defined(CONFIG_EISA) || defined(CONFIG_ISA)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/* MIPS port and memory-mapped I/O string operations. */
+static inline void __ide_flush_prologue(void)
+{
+#ifdef CONFIG_SMP
+ if (cpu_has_dc_aliases)
+ preempt_disable();
+#endif
+}
+
+static inline void __ide_flush_epilogue(void)
+{
+#ifdef CONFIG_SMP
+ if (cpu_has_dc_aliases)
+ preempt_enable();
+#endif
+}
+
+static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size)
+{
+ if (cpu_has_dc_aliases) {
+ unsigned long end = addr + size;
+
+ while (addr < end) {
+ local_flush_data_cache_page((void *)addr);
+ addr += PAGE_SIZE;
+ }
+ }
+}
+
+/*
+ * insw() and gang might be called with interrupts disabled, so we can't
+ * send IPIs for flushing due to the potencial of deadlocks, see the comment
+ * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
+ * problem by disabling preemption so we know we actually perform the flush
+ * on the processor that actually has the lines to be flushed which hopefully
+ * is even better for performance anyway.
+ */
+static inline void __ide_insw(unsigned long port, void *addr,
+ unsigned int count)
+{
+ __ide_flush_prologue();
+ insw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_insl(unsigned long port, void *addr, unsigned int count)
+{
+ __ide_flush_prologue();
+ insl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_outsw(unsigned long port, const void *addr,
+ unsigned long count)
+{
+ __ide_flush_prologue();
+ outsw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_outsl(unsigned long port, const void *addr,
+ unsigned long count)
+{
+ __ide_flush_prologue();
+ outsl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ readsw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ readsl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ writesw(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 2);
+ __ide_flush_epilogue();
+}
+
+static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
+{
+ __ide_flush_prologue();
+ writesl(port, addr, count);
+ __ide_flush_dcache_range((unsigned long)addr, count * 4);
+ __ide_flush_epilogue();
+}
+
+/* ide_insw calls insw, not __ide_insw. Why? */
+#undef insw
+#undef insl
+#undef outsw
+#undef outsl
+#define insw(port, addr, count) __ide_insw(port, addr, count)
+#define insl(port, addr, count) __ide_insl(port, addr, count)
+#define outsw(port, addr, count) __ide_outsw(port, addr, count)
+#define outsl(port, addr, count) __ide_outsl(port, addr, count)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_MACH_GENERIC_IDE_H */