summaryrefslogtreecommitdiffstats
path: root/arch/mips/sgi-ip27
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/mips/sgi-ip27
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2
linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/mips/sgi-ip27')
-rw-r--r--arch/mips/sgi-ip27/Makefile12
-rw-r--r--arch/mips/sgi-ip27/TODO23
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c94
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c76
-rw-r--r--arch/mips/sgi-ip27/ip27-dbgio.c60
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c186
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c252
-rw-r--r--arch/mips/sgi-ip27/ip27-irq-glue.S45
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c457
-rw-r--r--arch/mips/sgi-ip27/ip27-klconfig.c135
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c135
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c586
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c249
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c81
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c225
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c243
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c135
17 files changed, 2994 insertions, 0 deletions
diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile
new file mode 100644
index 000000000000..4ba340780c35
--- /dev/null
+++ b/arch/mips/sgi-ip27/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the IP27 specific kernel interface routines under Linux.
+#
+
+obj-y := ip27-berr.o ip27-console.o ip27-irq.o ip27-init.o ip27-irq-glue.o \
+ ip27-klconfig.o ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o \
+ ip27-timer.o ip27-hubio.o ip27-xtalk.o
+
+obj-$(CONFIG_KGDB) += ip27-dbgio.o
+obj-$(CONFIG_SMP) += ip27-smp.o
+
+EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/sgi-ip27/TODO b/arch/mips/sgi-ip27/TODO
new file mode 100644
index 000000000000..32106131b0d0
--- /dev/null
+++ b/arch/mips/sgi-ip27/TODO
@@ -0,0 +1,23 @@
+1. Need to figure out why PCI writes to the IOC3 hang, and if it is okay
+not to write to the IOC3 ever.
+2. Need to figure out RRB allocation in bridge_startup().
+3. Need to figure out why address swaizzling is needed in inw/outw for
+Qlogic scsi controllers.
+4. Need to integrate ip27-klconfig.c:find_lboard and
+ip27-init.c:find_lbaord_real. DONE
+5. Is it okay to set calias space on all nodes as 0, instead of 8k as
+in irix?
+6. Investigate why things do not work without the setup_test() call
+being invoked on all nodes in ip27-memory.c.
+7. Too many CLIs in the locore handlers :
+For the low level handlers set up by set_except_vector(),
+__tlb_refill_debug_tramp, __xtlb_refill_debug_tramp and cacheerror,
+investigate whether the code should do CLI, STI or KMODE.
+8. Too many do_page_faults invoked - investigate.
+9. start_thread must turn off UX64 ... and define tlb_refill_debug.
+10. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable
+does not agree with pgd_bad/pmd_bad.
+11. All intrs (ip27_do_irq handlers) are targetted at cpu A on the node.
+This might need to change later. Only the timer intr is set up to be
+received on both Cpu A and B. (ip27_do_irq()/bridge_startup())
+13. Cache flushing (specially the SMP version) has to be investigated.
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
new file mode 100644
index 000000000000..e1829a5d3b19
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -0,0 +1,94 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 by Silicon Graphics
+ * Copyright (C) 2002 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/module.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/sn0/hub.h>
+#include <asm/tlbdebug.h>
+#include <asm/traps.h>
+#include <asm/uaccess.h>
+
+extern void dump_tlb_addr(unsigned long addr);
+extern void dump_tlb_all(void);
+
+static void dump_hub_information(unsigned long errst0, unsigned long errst1)
+{
+ static char *err_type[2][8] = {
+ { NULL, "Uncached Partial Read PRERR", "DERR", "Read Timeout",
+ NULL, NULL, NULL, NULL },
+ { "WERR", "Uncached Partial Write", "PWERR", "Write Timeout",
+ NULL, NULL, NULL, NULL }
+ };
+ int wrb = errst1 & PI_ERR_ST1_WRBRRB_MASK;
+
+ if (!(errst0 & PI_ERR_ST0_VALID_MASK)) {
+ printk("Hub does not contain valid error information\n");
+ return;
+ }
+
+
+ printk("Hub has valid error information:\n");
+ if (errst0 & PI_ERR_ST0_OVERRUN_MASK)
+ printk("Overrun is set. Error stack may contain additional "
+ "information.\n");
+ printk("Hub error address is %08lx\n",
+ (errst0 & PI_ERR_ST0_ADDR_MASK) >> (PI_ERR_ST0_ADDR_SHFT - 3));
+ printk("Incoming message command 0x%lx\n",
+ (errst0 & PI_ERR_ST0_CMD_MASK) >> PI_ERR_ST0_CMD_SHFT);
+ printk("Supplemental field of incoming message is 0x%lx\n",
+ (errst0 & PI_ERR_ST0_SUPPL_MASK) >> PI_ERR_ST0_SUPPL_SHFT);
+ printk("T5 Rn (for RRB only) is 0x%lx\n",
+ (errst0 & PI_ERR_ST0_REQNUM_MASK) >> PI_ERR_ST0_REQNUM_SHFT);
+ printk("Error type is %s\n", err_type[wrb]
+ [(errst0 & PI_ERR_ST0_TYPE_MASK) >> PI_ERR_ST0_TYPE_SHFT]
+ ? : "invalid");
+}
+
+int ip27_be_handler(struct pt_regs *regs, int is_fixup)
+{
+ unsigned long errst0, errst1;
+ int data = regs->cp0_cause & 4;
+ int cpu = LOCAL_HUB_L(PI_CPU_NUM);
+
+ if (is_fixup)
+ return MIPS_BE_FIXUP;
+
+ printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
+ regs->cp0_epc);
+ printk("Hub information:\n");
+ printk("ERR_INT_PEND = 0x%06lx\n", LOCAL_HUB_L(PI_ERR_INT_PEND));
+ errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
+ errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
+ dump_hub_information(errst0, errst1);
+ show_regs(regs);
+ dump_tlb_all();
+ while(1);
+ force_sig(SIGBUS, current);
+}
+
+void __init ip27_be_init(void)
+{
+ /* XXX Initialize all the Hub & Bridge error handling here. */
+ int cpu = LOCAL_HUB_L(PI_CPU_NUM);
+ int cpuoff = cpu << 8;
+
+ board_be_handler = ip27_be_handler;
+
+ LOCAL_HUB_S(PI_ERR_INT_PEND,
+ cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
+ LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
+ LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
+ LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
+ LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
+}
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
new file mode 100644
index 000000000000..d97f5b5ef844
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -0,0 +1,76 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 2002 Ralf Baechle
+ */
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/termios.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+
+#include <asm/page.h>
+#include <asm/semaphore.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn0/hub.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+#include <asm/sn/sn_private.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#define IOC3_CLK (22000000 / 3)
+#define IOC3_FLAGS (0)
+
+static inline struct ioc3_uartregs *console_uart(void)
+{
+ struct ioc3 *ioc3;
+
+ ioc3 = (struct ioc3 *)KL_CONFIG_CH_CONS_INFO(get_nasid())->memory_base;
+
+ return &ioc3->sregs.uarta;
+}
+
+void prom_putchar(char c)
+{
+ struct ioc3_uartregs *uart = console_uart();
+
+ while ((uart->iu_lsr & 0x20) == 0);
+ uart->iu_thr = c;
+}
+
+char __init prom_getchar(void)
+{
+ return 0;
+}
+
+static void inline ioc3_console_probe(void)
+{
+ struct uart_port up;
+
+ /*
+ * Register to interrupt zero because we share the interrupt with
+ * the serial driver which we don't properly support yet.
+ */
+ memset(&up, 0, sizeof(up));
+ up.membase = (unsigned char *) console_uart();
+ up.irq = 0;
+ up.uartclk = IOC3_CLK;
+ up.regshift = 0;
+ up.iotype = UPIO_MEM;
+ up.flags = IOC3_FLAGS;
+ up.line = 0;
+
+ if (early_serial_setup(&up))
+ printk(KERN_ERR "Early serial init of port 0 failed\n");
+}
+
+__init void ip27_setup_console(void)
+{
+ ioc3_console_probe();
+}
diff --git a/arch/mips/sgi-ip27/ip27-dbgio.c b/arch/mips/sgi-ip27/ip27-dbgio.c
new file mode 100644
index 000000000000..08fd88b36f80
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-dbgio.c
@@ -0,0 +1,60 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Copyright 2004 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn0/hub.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+#include <asm/sn/sn_private.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+#define IOC3_CLK (22000000 / 3)
+#define IOC3_FLAGS (0)
+
+static inline struct ioc3_uartregs *console_uart(void)
+{
+ struct ioc3 *ioc3;
+
+ ioc3 = (struct ioc3 *)KL_CONFIG_CH_CONS_INFO(get_nasid())->memory_base;
+
+ return &ioc3->sregs.uarta;
+}
+
+unsigned char getDebugChar(void)
+{
+ struct ioc3_uartregs *uart = console_uart();
+
+ while ((uart->iu_lsr & UART_LSR_DR) == 0);
+ return uart->iu_rbr;
+}
+
+void putDebugChar(unsigned char c)
+{
+ struct ioc3_uartregs *uart = console_uart();
+
+ while ((uart->iu_lsr & UART_LSR_THRE) == 0);
+ uart->iu_thr = c;
+}
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
new file mode 100644
index 000000000000..524b371f9397
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Christoph Hellwig.
+ * Released under GPL v2.
+ *
+ * Support functions for the HUB ASIC - mostly PIO mapping related.
+ */
+
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/mmzone.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/hub.h>
+
+
+static int force_fire_and_forget = 1;
+
+/**
+ * hub_pio_map - establish a HUB PIO mapping
+ *
+ * @hub: hub to perform PIO mapping on
+ * @widget: widget ID to perform PIO mapping for
+ * @xtalk_addr: xtalk_address that needs to be mapped
+ * @size: size of the PIO mapping
+ *
+ **/
+unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
+ unsigned long xtalk_addr, size_t size)
+{
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
+ volatile hubreg_t junk;
+ unsigned i;
+
+ /* use small-window mapping if possible */
+ if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
+ return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
+
+ if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
+ printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
+ " too big (%ld)\n",
+ nasid, widget, xtalk_addr, size);
+ return 0;
+ }
+
+ xtalk_addr &= ~(BWIN_SIZE-1);
+ for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
+ if (test_and_set_bit(i, hub_data(cnode)->h_bigwin_used))
+ continue;
+
+ /*
+ * The code below does a PIO write to setup an ITTE entry.
+ *
+ * We need to prevent other CPUs from seeing our updated
+ * memory shadow of the ITTE (in the piomap) until the ITTE
+ * entry is actually set up; otherwise, another CPU might
+ * attempt a PIO prematurely.
+ *
+ * Also, the only way we can know that an entry has been
+ * received by the hub and can be used by future PIO reads/
+ * writes is by reading back the ITTE entry after writing it.
+ *
+ * For these two reasons, we PIO read back the ITTE entry
+ * after we write it.
+ */
+ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
+ junk = HUB_L(IIO_ITTE_GET(nasid, i));
+
+ return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
+ }
+
+ printk(KERN_WARNING "unable to establish PIO mapping for at"
+ " hub %d widget %d addr 0x%lx\n",
+ nasid, widget, xtalk_addr);
+ return 0;
+}
+
+
+/*
+ * hub_setup_prb(nasid, prbnum, credits, conveyor)
+ *
+ * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
+ * put it into conveyor belt mode with the specified number of credits.
+ */
+static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
+{
+ iprb_t prb;
+ int prb_offset;
+
+ /*
+ * Get the current register value.
+ */
+ prb_offset = IIO_IOPRB(prbnum);
+ prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
+
+ /*
+ * Clear out some fields.
+ */
+ prb.iprb_ovflow = 1;
+ prb.iprb_bnakctr = 0;
+ prb.iprb_anakctr = 0;
+
+ /*
+ * Enable or disable fire-and-forget mode.
+ */
+ prb.iprb_ff = force_fire_and_forget ? 1 : 0;
+
+ /*
+ * Set the appropriate number of PIO cresits for the widget.
+ */
+ prb.iprb_xtalkctr = credits;
+
+ /*
+ * Store the new value to the register.
+ */
+ REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
+}
+
+/**
+ * hub_set_piomode - set pio mode for a given hub
+ *
+ * @nasid: physical node ID for the hub in question
+ *
+ * Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
+ * To do this, we have to make absolutely sure that no PIOs are in progress
+ * so we turn off access to all widgets for the duration of the function.
+ *
+ * XXX - This code should really check what kind of widget we're talking
+ * to. Bridges can only handle three requests, but XG will do more.
+ * How many can crossbow handle to widget 0? We're assuming 1.
+ *
+ * XXX - There is a bug in the crossbow that link reset PIOs do not
+ * return write responses. The easiest solution to this problem is to
+ * leave widget 0 (xbow) in fire-and-forget mode at all times. This
+ * only affects pio's to xbow registers, which should be rare.
+ **/
+static void hub_set_piomode(nasid_t nasid)
+{
+ hubreg_t ii_iowa;
+ hubii_wcr_t ii_wcr;
+ unsigned i;
+
+ ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
+ REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
+
+ ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
+
+ if (ii_wcr.iwcr_dir_con) {
+ /*
+ * Assume a bridge here.
+ */
+ hub_setup_prb(nasid, 0, 3);
+ } else {
+ /*
+ * Assume a crossbow here.
+ */
+ hub_setup_prb(nasid, 0, 1);
+ }
+
+ /*
+ * XXX - Here's where we should take the widget type into
+ * when account assigning credits.
+ */
+ for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
+ hub_setup_prb(nasid, i, 3);
+
+ REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
+}
+
+/*
+ * hub_pio_init - PIO-related hub initalization
+ *
+ * @hub: hubinfo structure for our hub
+ */
+void hub_pio_init(cnodeid_t cnode)
+{
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
+ unsigned i;
+
+ /* initialize big window piomaps for this hub */
+ bitmap_zero(hub_data(cnode)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
+ for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
+ IIO_ITTE_DISABLE(nasid, i);
+
+ hub_set_piomode(nasid);
+}
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
new file mode 100644
index 000000000000..6dcee5c46c74
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -0,0 +1,252 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/cpumask.h>
+#include <asm/cpu.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/time.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sn0/addrs.h>
+#include <asm/sn/sn0/hubni.h>
+#include <asm/sn/sn0/hubio.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/ioc3.h>
+#include <asm/mipsregs.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/hub.h>
+#include <asm/sn/intr.h>
+#include <asm/current.h>
+#include <asm/smp.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/sn/launch.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/sn0/ip27.h>
+#include <asm/sn/mapped_kernel.h>
+
+#define CPU_NONE (cpuid_t)-1
+
+static DECLARE_BITMAP(hub_init_mask, MAX_COMPACT_NODES);
+nasid_t master_nasid = INVALID_NASID;
+
+cnodeid_t nasid_to_compact_node[MAX_NASIDS];
+nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
+cnodeid_t cpuid_to_compact_node[MAXCPUS];
+
+EXPORT_SYMBOL(nasid_to_compact_node);
+
+extern void pcibr_setup(cnodeid_t);
+
+extern void xtalk_probe_node(cnodeid_t nid);
+
+static void __init per_hub_init(cnodeid_t cnode)
+{
+ struct hub_data *hub = hub_data(cnode);
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+ cpu_set(smp_processor_id(), hub->h_cpus);
+
+ if (test_and_set_bit(cnode, hub_init_mask))
+ return;
+
+ /*
+ * Set CRB timeout at 5ms, (< PI timeout of 10ms)
+ */
+ REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
+ REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
+
+ hub_rtc_init(cnode);
+ xtalk_probe_node(cnode);
+
+#ifdef CONFIG_REPLICATE_EXHANDLERS
+ /*
+ * If this is not a headless node initialization,
+ * copy over the caliased exception handlers.
+ */
+ if (get_compact_nodeid() == cnode) {
+ extern char except_vec2_generic, except_vec3_generic;
+ extern void build_tlb_refill_handler(void);
+
+ memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
+ memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
+ build_tlb_refill_handler();
+ memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
+ memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
+ __flush_cache_all();
+ }
+#endif
+}
+
+void __init per_cpu_init(void)
+{
+ int cpu = smp_processor_id();
+ int slice = LOCAL_HUB_L(PI_CPU_NUM);
+ cnodeid_t cnode = get_compact_nodeid();
+ struct hub_data *hub = hub_data(cnode);
+ struct slice_data *si = hub->slice + slice;
+ int i;
+
+ if (test_and_set_bit(slice, &hub->slice_map))
+ return;
+
+ clear_c0_status(ST0_IM);
+
+ for (i = 0; i < LEVELS_PER_SLICE; i++)
+ si->level_to_irq[i] = -1;
+
+ /*
+ * Some interrupts are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be used accidently
+ * later.
+ */
+ for (i = 0; i <= BASE_PCI_IRQ; i++) {
+ __set_bit(i, si->irq_alloc_mask);
+ LOCAL_HUB_S(PI_INT_PEND_MOD, i);
+ }
+
+ __set_bit(IP_PEND0_6_63, si->irq_alloc_mask);
+ LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
+
+ for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
+ __set_bit(i, si->irq_alloc_mask + 1);
+ LOCAL_HUB_S(PI_INT_PEND_MOD, i);
+ }
+
+ LOCAL_HUB_L(PI_INT_PEND0);
+
+ /*
+ * We use this so we can find the local hub's data as fast as only
+ * possible.
+ */
+ cpu_data[cpu].data = si;
+
+ cpu_time_init();
+ install_ipi();
+
+ /* Install our NMI handler if symmon hasn't installed one. */
+ install_cpu_nmi_handler(cputoslice(cpu));
+
+ set_c0_status(SRB_DEV0 | SRB_DEV1);
+
+ per_hub_init(cnode);
+}
+
+/*
+ * get_nasid() returns the physical node id number of the caller.
+ */
+nasid_t
+get_nasid(void)
+{
+ return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
+ >> NSRI_NODEID_SHFT);
+}
+
+/*
+ * Map the physical node id to a virtual node id (virtual node ids are contiguous).
+ */
+cnodeid_t get_compact_nodeid(void)
+{
+ return NASID_TO_COMPACT_NODEID(get_nasid());
+}
+
+/* Extracted from the IOC3 meta driver. FIXME. */
+static inline void ioc3_sio_init(void)
+{
+ struct ioc3 *ioc3;
+ nasid_t nid;
+ long loops;
+
+ nid = get_nasid();
+ ioc3 = (struct ioc3 *) KL_CONFIG_CH_CONS_INFO(nid)->memory_base;
+
+ ioc3->sscr_a = 0; /* PIO mode for uarta. */
+ ioc3->sscr_b = 0; /* PIO mode for uartb. */
+ ioc3->sio_iec = ~0;
+ ioc3->sio_ies = (SIO_IR_SA_INT | SIO_IR_SB_INT);
+
+ loops=1000000; while(loops--);
+ ioc3->sregs.uarta.iu_fcr = 0;
+ ioc3->sregs.uartb.iu_fcr = 0;
+ loops=1000000; while(loops--);
+}
+
+static inline void ioc3_eth_init(void)
+{
+ struct ioc3 *ioc3;
+ nasid_t nid;
+
+ nid = get_nasid();
+ ioc3 = (struct ioc3 *) KL_CONFIG_CH_CONS_INFO(nid)->memory_base;
+
+ ioc3->eier = 0;
+}
+
+extern void ip27_setup_console(void);
+extern void ip27_time_init(void);
+extern void ip27_reboot_setup(void);
+
+static int __init ip27_setup(void)
+{
+ hubreg_t p, e, n_mode;
+ nasid_t nid;
+
+ ip27_setup_console();
+ ip27_reboot_setup();
+
+ /*
+ * hub_rtc init and cpu clock intr enabled for later calibrate_delay.
+ */
+ nid = get_nasid();
+ printk("IP27: Running on node %d.\n", nid);
+
+ p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1;
+ e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1;
+ printk("Node %d has %s primary CPU%s.\n", nid,
+ p ? "a" : "no",
+ e ? ", CPU is running" : "");
+
+ p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1;
+ e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1;
+ printk("Node %d has %s secondary CPU%s.\n", nid,
+ p ? "a" : "no",
+ e ? ", CPU is running" : "");
+
+ /*
+ * Try to catch kernel missconfigurations and give user an
+ * indication what option to select.
+ */
+ n_mode = LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_MORENODES_MASK;
+ printk("Machine is in %c mode.\n", n_mode ? 'N' : 'M');
+#ifdef CONFIG_SGI_SN0_N_MODE
+ if (!n_mode)
+ panic("Kernel compiled for M mode.");
+#else
+ if (n_mode)
+ panic("Kernel compiled for N mode.");
+#endif
+
+ ioc3_sio_init();
+ ioc3_eth_init();
+ per_cpu_init();
+
+ set_io_port_base(IO_BASE);
+
+ board_time_init = ip27_time_init;
+
+ return 0;
+}
+
+early_initcall(ip27_setup);
diff --git a/arch/mips/sgi-ip27/ip27-irq-glue.S b/arch/mips/sgi-ip27/ip27-irq-glue.S
new file mode 100644
index 000000000000..c304df715e0a
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-irq-glue.S
@@ -0,0 +1,45 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .text
+ .align 5
+NESTED(ip27_irq, PT_SIZE, sp)
+ SAVE_ALL
+ CLI
+
+ mfc0 s0, CP0_CAUSE
+ mfc0 t0, CP0_STATUS
+ and s0, t0
+ move a0, sp
+ PTR_LA ra, ret_from_irq
+
+ /* First check for RT interrupt. */
+ andi t0, s0, CAUSEF_IP4
+ bnez t0, ip4
+ andi t0, s0, CAUSEF_IP2
+ bnez t0, ip2
+ andi t0, s0, CAUSEF_IP3
+ bnez t0, ip3
+ andi t0, s0, CAUSEF_IP5
+ bnez t0, ip5
+ andi t0, s0, CAUSEF_IP6
+ bnez t0, ip6
+ j ra
+
+ip2: j ip27_do_irq_mask0 # PI_INT_PEND_0 or CC_PEND_{A|B}
+ip3: j ip27_do_irq_mask1 # PI_INT_PEND_1
+ip4: j ip27_rt_timer_interrupt
+ip5: j ip27_prof_timer
+ip6: j ip27_hub_error
+
+ END(ip27_irq)
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
new file mode 100644
index 000000000000..61817a18aed2
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -0,0 +1,457 @@
+/*
+ * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
+ *
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999 - 2001 Kanoj Sarcar
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/timex.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel_stat.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/bootinfo.h>
+#include <asm/io.h>
+#include <asm/mipsregs.h>
+#include <asm/system.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/pci/bridge.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/hub.h>
+#include <asm/sn/intr.h>
+
+#undef DEBUG_IRQ
+#ifdef DEBUG_IRQ
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+/*
+ * Linux has a controller-independent x86 interrupt architecture.
+ * every controller has a 'controller-template', that is used
+ * by the main code to do the right thing. Each driver-visible
+ * interrupt source is transparently wired to the apropriate
+ * controller. Thus drivers need not be aware of the
+ * interrupt-controller.
+ *
+ * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
+ * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
+ * (IO-APICs assumed to be messaging to Pentium local-APICs)
+ *
+ * the code is designed to be easily extended with new/different
+ * interrupt controllers, without having to do assembly magic.
+ */
+
+extern asmlinkage void ip27_irq(void);
+
+extern struct bridge_controller *irq_to_bridge[];
+extern int irq_to_slot[];
+
+/*
+ * use these macros to get the encoded nasid and widget id
+ * from the irq value
+ */
+#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
+#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
+
+static inline int alloc_level(int cpu, int irq)
+{
+ struct slice_data *si = cpu_data[cpu].data;
+ int level; /* pre-allocated entries */
+
+ level = find_first_zero_bit(si->irq_alloc_mask, LEVELS_PER_SLICE);
+ if (level >= LEVELS_PER_SLICE)
+ panic("Cpu %d flooded with devices\n", cpu);
+
+ __set_bit(level, si->irq_alloc_mask);
+ si->level_to_irq[level] = irq;
+
+ return level;
+}
+
+static inline int find_level(cpuid_t *cpunum, int irq)
+{
+ int cpu, i;
+
+ for (cpu = 0; cpu <= NR_CPUS; cpu++) {
+ struct slice_data *si = cpu_data[cpu].data;
+
+ if (!cpu_online(cpu))
+ continue;
+
+ for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
+ if (si->level_to_irq[i] == irq) {
+ *cpunum = cpu;
+
+ return i;
+ }
+ }
+
+ panic("Could not identify cpu/level for irq %d\n", irq);
+}
+
+/*
+ * Find first bit set
+ */
+static int ms1bit(unsigned long x)
+{
+ int b = 0, s;
+
+ s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
+ s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s;
+ s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s;
+ s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s;
+ s = 1; if (x >> 1 == 0) s = 0; b += s;
+
+ return b;
+}
+
+/*
+ * This code is unnecessarily complex, because we do SA_INTERRUPT
+ * intr enabling. Basically, once we grab the set of intrs we need
+ * to service, we must mask _all_ these interrupts; firstly, to make
+ * sure the same intr does not intr again, causing recursion that
+ * can lead to stack overflow. Secondly, we can not just mask the
+ * one intr we are do_IRQing, because the non-masked intrs in the
+ * first set might intr again, causing multiple servicings of the
+ * same intr. This effect is mostly seen for intercpu intrs.
+ * Kanoj 05.13.00
+ */
+
+void ip27_do_irq_mask0(struct pt_regs *regs)
+{
+ int irq, swlevel;
+ hubreg_t pend0, mask0;
+ cpuid_t cpu = smp_processor_id();
+ int pi_int_mask0 =
+ (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B;
+
+ /* copied from Irix intpend0() */
+ pend0 = LOCAL_HUB_L(PI_INT_PEND0);
+ mask0 = LOCAL_HUB_L(pi_int_mask0);
+
+ pend0 &= mask0; /* Pick intrs we should look at */
+ if (!pend0)
+ return;
+
+ swlevel = ms1bit(pend0);
+#ifdef CONFIG_SMP
+ if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
+ } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
+ } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
+ smp_call_function_interrupt();
+ } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
+ LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
+ smp_call_function_interrupt();
+ } else
+#endif
+ {
+ /* "map" swlevel to irq */
+ struct slice_data *si = cpu_data[cpu].data;
+
+ irq = si->level_to_irq[swlevel];
+ do_IRQ(irq, regs);
+ }
+
+ LOCAL_HUB_L(PI_INT_PEND0);
+}
+
+void ip27_do_irq_mask1(struct pt_regs *regs)
+{
+ int irq, swlevel;
+ hubreg_t pend1, mask1;
+ cpuid_t cpu = smp_processor_id();
+ int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B;
+ struct slice_data *si = cpu_data[cpu].data;
+
+ /* copied from Irix intpend0() */
+ pend1 = LOCAL_HUB_L(PI_INT_PEND1);
+ mask1 = LOCAL_HUB_L(pi_int_mask1);
+
+ pend1 &= mask1; /* Pick intrs we should look at */
+ if (!pend1)
+ return;
+
+ swlevel = ms1bit(pend1);
+ /* "map" swlevel to irq */
+ irq = si->level_to_irq[swlevel];
+ LOCAL_HUB_CLR_INTR(swlevel);
+ do_IRQ(irq, regs);
+
+ LOCAL_HUB_L(PI_INT_PEND1);
+}
+
+void ip27_prof_timer(struct pt_regs *regs)
+{
+ panic("CPU %d got a profiling interrupt", smp_processor_id());
+}
+
+void ip27_hub_error(struct pt_regs *regs)
+{
+ panic("CPU %d got a hub error interrupt", smp_processor_id());
+}
+
+static int intr_connect_level(int cpu, int bit)
+{
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ struct slice_data *si = cpu_data[cpu].data;
+
+ __set_bit(bit, si->irq_enable_mask);
+
+ if (!cputoslice(cpu)) {
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
+ } else {
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
+ }
+
+ return 0;
+}
+
+static int intr_disconnect_level(int cpu, int bit)
+{
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ struct slice_data *si = cpu_data[cpu].data;
+
+ __clear_bit(bit, si->irq_enable_mask);
+
+ if (!cputoslice(cpu)) {
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
+ } else {
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
+ }
+
+ return 0;
+}
+
+/* Startup one of the (PCI ...) IRQs routes over a bridge. */
+static unsigned int startup_bridge_irq(unsigned int irq)
+{
+ struct bridge_controller *bc;
+ bridgereg_t device;
+ bridge_t *bridge;
+ int pin, swlevel;
+ cpuid_t cpu;
+
+ pin = SLOT_FROM_PCI_IRQ(irq);
+ bc = IRQ_TO_BRIDGE(irq);
+ bridge = bc->base;
+
+ DBG("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin);
+ /*
+ * "map" irq to a swlevel greater than 6 since the first 6 bits
+ * of INT_PEND0 are taken
+ */
+ swlevel = find_level(&cpu, irq);
+ bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
+ bridge->b_int_enable |= (1 << pin);
+ bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
+
+ /*
+ * Enable sending of an interrupt clear packt to the hub on a high to
+ * low transition of the interrupt pin.
+ *
+ * IRIX sets additional bits in the address which are documented as
+ * reserved in the bridge docs.
+ */
+ bridge->b_int_mode |= (1UL << pin);
+
+ /*
+ * We assume the bridge to have a 1:1 mapping between devices
+ * (slots) and intr pins.
+ */
+ device = bridge->b_int_device;
+ device &= ~(7 << (pin*3));
+ device |= (pin << (pin*3));
+ bridge->b_int_device = device;
+
+ bridge->b_wid_tflush;
+
+ return 0; /* Never anything pending. */
+}
+
+/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
+static void shutdown_bridge_irq(unsigned int irq)
+{
+ struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
+ bridge_t *bridge = bc->base;
+ struct slice_data *si = cpu_data[bc->irq_cpu].data;
+ int pin, swlevel;
+ cpuid_t cpu;
+
+ DBG("bridge_shutdown: irq 0x%x\n", irq);
+ pin = SLOT_FROM_PCI_IRQ(irq);
+
+ /*
+ * map irq to a swlevel greater than 6 since the first 6 bits
+ * of INT_PEND0 are taken
+ */
+ swlevel = find_level(&cpu, irq);
+ intr_disconnect_level(cpu, swlevel);
+
+ __clear_bit(swlevel, si->irq_alloc_mask);
+ si->level_to_irq[swlevel] = -1;
+
+ bridge->b_int_enable &= ~(1 << pin);
+ bridge->b_wid_tflush;
+}
+
+static inline void enable_bridge_irq(unsigned int irq)
+{
+ cpuid_t cpu;
+ int swlevel;
+
+ swlevel = find_level(&cpu, irq); /* Criminal offence */
+ intr_connect_level(cpu, swlevel);
+}
+
+static inline void disable_bridge_irq(unsigned int irq)
+{
+ cpuid_t cpu;
+ int swlevel;
+
+ swlevel = find_level(&cpu, irq); /* Criminal offence */
+ intr_disconnect_level(cpu, swlevel);
+}
+
+static void mask_and_ack_bridge_irq(unsigned int irq)
+{
+ disable_bridge_irq(irq);
+}
+
+static void end_bridge_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
+ irq_desc[irq].action)
+ enable_bridge_irq(irq);
+}
+
+static struct hw_interrupt_type bridge_irq_type = {
+ .typename = "bridge",
+ .startup = startup_bridge_irq,
+ .shutdown = shutdown_bridge_irq,
+ .enable = enable_bridge_irq,
+ .disable = disable_bridge_irq,
+ .ack = mask_and_ack_bridge_irq,
+ .end = end_bridge_irq,
+};
+
+static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
+
+static int allocate_irqno(void)
+{
+ int irq;
+
+again:
+ irq = find_first_zero_bit(irq_map, NR_IRQS);
+
+ if (irq >= NR_IRQS)
+ return -ENOSPC;
+
+ if (test_and_set_bit(irq, irq_map))
+ goto again;
+
+ return irq;
+}
+
+void free_irqno(unsigned int irq)
+{
+ clear_bit(irq, irq_map);
+}
+
+void __devinit register_bridge_irq(unsigned int irq)
+{
+ irq_desc[irq].status = IRQ_DISABLED;
+ irq_desc[irq].action = 0;
+ irq_desc[irq].depth = 1;
+ irq_desc[irq].handler = &bridge_irq_type;
+}
+
+int __devinit request_bridge_irq(struct bridge_controller *bc)
+{
+ int irq = allocate_irqno();
+ int swlevel, cpu;
+ nasid_t nasid;
+
+ if (irq < 0)
+ return irq;
+
+ /*
+ * "map" irq to a swlevel greater than 6 since the first 6 bits
+ * of INT_PEND0 are taken
+ */
+ cpu = bc->irq_cpu;
+ swlevel = alloc_level(cpu, irq);
+ if (unlikely(swlevel < 0)) {
+ free_irqno(irq);
+
+ return -EAGAIN;
+ }
+
+ /* Make sure it's not already pending when we connect it. */
+ nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ REMOTE_HUB_CLR_INTR(nasid, swlevel);
+
+ intr_connect_level(cpu, swlevel);
+
+ register_bridge_irq(irq);
+
+ return irq;
+}
+
+void __init arch_init_irq(void)
+{
+ set_except_vector(0, ip27_irq);
+}
+
+void install_ipi(void)
+{
+ int slice = LOCAL_HUB_L(PI_CPU_NUM);
+ int cpu = smp_processor_id();
+ struct slice_data *si = cpu_data[cpu].data;
+ hubreg_t mask, set;
+
+ if (slice == 0) {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
+ LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
+ mask = LOCAL_HUB_L(PI_INT_MASK0_A); /* Slice A */
+ set = (1UL << CPU_RESCHED_A_IRQ) | (1UL << CPU_CALL_A_IRQ);
+ mask |= set;
+ si->irq_enable_mask[0] |= set;
+ si->irq_alloc_mask[0] |= set;
+ LOCAL_HUB_S(PI_INT_MASK0_A, mask);
+ } else {
+ LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
+ LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
+ mask = LOCAL_HUB_L(PI_INT_MASK0_B); /* Slice B */
+ set = (1UL << CPU_RESCHED_B_IRQ) | (1UL << CPU_CALL_B_IRQ);
+ mask |= set;
+ si->irq_enable_mask[1] |= set;
+ si->irq_alloc_mask[1] |= set;
+ LOCAL_HUB_S(PI_INT_MASK0_B, mask);
+ }
+}
diff --git a/arch/mips/sgi-ip27/ip27-klconfig.c b/arch/mips/sgi-ip27/ip27-klconfig.c
new file mode 100644
index 000000000000..dd830b3670d1
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klconfig.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/param.h>
+#include <linux/timex.h>
+#include <linux/mm.h>
+
+#include <asm/sn/klconfig.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+
+klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
+{
+ int index, j;
+
+ if (kli == (klinfo_t *)NULL) {
+ index = 0;
+ } else {
+ for (j = 0; j < KLCF_NUM_COMPS(brd); j++)
+ if (kli == KLCF_COMP(brd, j))
+ break;
+ index = j;
+ if (index == KLCF_NUM_COMPS(brd)) {
+ printk("find_component: Bad pointer: 0x%p\n", kli);
+ return (klinfo_t *)NULL;
+ }
+ index++; /* next component */
+ }
+
+ for (; index < KLCF_NUM_COMPS(brd); index++) {
+ kli = KLCF_COMP(brd, index);
+ if (KLCF_COMP_TYPE(kli) == struct_type)
+ return kli;
+ }
+
+ /* Didn't find it. */
+ return (klinfo_t *)NULL;
+}
+
+klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type)
+{
+ return find_component(brd, (klinfo_t *)NULL, struct_type);
+}
+
+lboard_t * find_lboard(lboard_t *start, unsigned char brd_type)
+{
+ /* Search all boards stored on this node. */
+ while (start) {
+ if (start->brd_type == brd_type)
+ return start;
+ start = KLCF_NEXT(start);
+ }
+ /* Didn't find it. */
+ return (lboard_t *)NULL;
+}
+
+lboard_t * find_lboard_class(lboard_t *start, unsigned char brd_type)
+{
+ /* Search all boards stored on this node. */
+ while (start) {
+ if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
+ return start;
+ start = KLCF_NEXT(start);
+ }
+
+ /* Didn't find it. */
+ return (lboard_t *)NULL;
+}
+
+cnodeid_t get_cpu_cnode(cpuid_t cpu)
+{
+ return CPUID_TO_COMPACT_NODEID(cpu);
+}
+
+klcpu_t * nasid_slice_to_cpuinfo(nasid_t nasid, int slice)
+{
+ lboard_t *brd;
+ klcpu_t *acpu;
+
+ if (!(brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27)))
+ return (klcpu_t *)NULL;
+
+ if (!(acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU)))
+ return (klcpu_t *)NULL;
+
+ do {
+ if ((acpu->cpu_info.physid) == slice)
+ return acpu;
+ } while ((acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
+ KLSTRUCT_CPU)));
+ return (klcpu_t *)NULL;
+}
+
+klcpu_t * sn_get_cpuinfo(cpuid_t cpu)
+{
+ nasid_t nasid;
+ int slice;
+ klcpu_t *acpu;
+ gda_t *gdap = GDA;
+ cnodeid_t cnode;
+
+ if (!(cpu < MAXCPUS)) {
+ printk("sn_get_cpuinfo: illegal cpuid 0x%lx\n", cpu);
+ return NULL;
+ }
+
+ cnode = get_cpu_cnode(cpu);
+ if (cnode == INVALID_CNODEID)
+ return NULL;
+
+ if ((nasid = gdap->g_nasidtable[cnode]) == INVALID_NASID)
+ return NULL;
+
+ for (slice = 0; slice < CPUS_PER_NODE; slice++) {
+ acpu = nasid_slice_to_cpuinfo(nasid, slice);
+ if (acpu && acpu->cpu_info.virtid == cpu)
+ return acpu;
+ }
+ return NULL;
+}
+
+int get_cpu_slice(cpuid_t cpu)
+{
+ klcpu_t *acpu;
+
+ if ((acpu = sn_get_cpuinfo(cpu)) == NULL)
+ return -1;
+ return acpu->cpu_info.physid;
+}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
new file mode 100644
index 000000000000..41c3f405e00c
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -0,0 +1,135 @@
+/*
+ * Ported from IRIX to Linux by Kanoj Sarcar, 06/08/00.
+ * Copyright 2000 - 2001 Silicon Graphics, Inc.
+ * Copyright 2000 - 2001 Kanoj Sarcar (kanoj@sgi.com)
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/mmzone.h>
+#include <linux/kernel.h>
+#include <linux/nodemask.h>
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/sections.h>
+#include <asm/smp.h>
+#include <asm/sn/types.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/hub.h>
+#include <asm/sn/mapped_kernel.h>
+#include <asm/sn/sn_private.h>
+
+static cpumask_t ktext_repmask;
+
+/*
+ * XXX - This needs to be much smarter about where it puts copies of the
+ * kernel. For example, we should never put a copy on a headless node,
+ * and we should respect the topology of the machine.
+ */
+void __init setup_replication_mask()
+{
+ cnodeid_t cnode;
+
+ /* Set only the master cnode's bit. The master cnode is always 0. */
+ cpus_clear(ktext_repmask);
+ cpu_set(0, ktext_repmask);
+
+#ifdef CONFIG_REPLICATE_KTEXT
+#ifndef CONFIG_MAPPED_KERNEL
+#error Kernel replication works with mapped kernel support. No calias support.
+#endif
+ for_each_online_node(cnode) {
+ if (cnode == 0)
+ continue;
+ /* Advertise that we have a copy of the kernel */
+ cpu_set(cnode, ktext_repmask);
+ }
+#endif
+ /* Set up a GDA pointer to the replication mask. */
+ GDA->g_ktext_repmask = &ktext_repmask;
+}
+
+
+static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
+{
+ cnodeid_t client_cnode;
+ kern_vars_t *kvp;
+
+ client_cnode = NASID_TO_COMPACT_NODEID(client_nasid);
+
+ kvp = &hub_data(client_nasid)->kern_vars;
+
+ KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
+
+ kvp->kv_magic = KV_MAGIC;
+ kvp->kv_ro_nasid = server_nasid;
+ kvp->kv_rw_nasid = master_nasid;
+ kvp->kv_ro_baseaddr = NODE_CAC_BASE(server_nasid);
+ kvp->kv_rw_baseaddr = NODE_CAC_BASE(master_nasid);
+ printk("REPLICATION: ON nasid %d, ktext from nasid %d, kdata from nasid %d\n", client_nasid, server_nasid, master_nasid);
+}
+
+/* XXX - When the BTE works, we should use it instead of this. */
+static __init void copy_kernel(nasid_t dest_nasid)
+{
+ unsigned long dest_kern_start, source_start, source_end, kern_size;
+
+ source_start = (unsigned long) _stext;
+ source_end = (unsigned long) _etext;
+ kern_size = source_end - source_start;
+
+ dest_kern_start = CHANGE_ADDR_NASID(MAPPED_KERN_RO_TO_K0(source_start),
+ dest_nasid);
+ memcpy((void *)dest_kern_start, (void *)source_start, kern_size);
+}
+
+void __init replicate_kernel_text()
+{
+ cnodeid_t cnode;
+ nasid_t client_nasid;
+ nasid_t server_nasid;
+
+ server_nasid = master_nasid;
+
+ /* Record where the master node should get its kernel text */
+ set_ktext_source(master_nasid, master_nasid);
+
+ for_each_online_node(cnode) {
+ if (cnode == 0)
+ continue;
+ client_nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+ /* Check if this node should get a copy of the kernel */
+ if (cpu_isset(cnode, ktext_repmask)) {
+ server_nasid = client_nasid;
+ copy_kernel(server_nasid);
+ }
+
+ /* Record where this node should get its kernel text */
+ set_ktext_source(client_nasid, server_nasid);
+ }
+}
+
+/*
+ * Return pfn of first free page of memory on a node. PROM may allocate
+ * data structures on the first couple of pages of the first slot of each
+ * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
+ */
+pfn_t node_getfirstfree(cnodeid_t cnode)
+{
+ unsigned long loadbase = REP_BASE;
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
+ unsigned long offset;
+
+#ifdef CONFIG_MAPPED_KERNEL
+ loadbase += 16777216;
+#endif
+ offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
+ if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask)))
+ return (TO_NODE(nasid, offset) >> PAGE_SHIFT);
+ else
+ return (KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >>
+ PAGE_SHIFT);
+}
+
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
new file mode 100644
index 000000000000..0a44a98d7adc
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -0,0 +1,586 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2000 by Silicon Graphics, Inc.
+ * Copyright (C) 2004 by Christoph Hellwig
+ *
+ * On SGI IP27 the ARC memory configuration data is completly bogus but
+ * alternate easier to use mechanisms are available.
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+#include <asm/sn/arch.h>
+#include <asm/sn/hub.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn_private.h>
+
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+
+#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
+#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
+
+#define SLOT_IGNORED 0xffff
+
+static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES];
+static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS];
+static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
+
+struct node_data *__node_data[MAX_COMPACT_NODES];
+
+EXPORT_SYMBOL(__node_data);
+
+static int fine_mode;
+
+static int is_fine_dirmode(void)
+{
+ return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
+ >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
+}
+
+static hubreg_t get_region(cnodeid_t cnode)
+{
+ if (fine_mode)
+ return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
+ else
+ return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
+}
+
+static hubreg_t region_mask;
+
+static void gen_region_mask(hubreg_t *region_mask)
+{
+ cnodeid_t cnode;
+
+ (*region_mask) = 0;
+ for_each_online_node(cnode) {
+ (*region_mask) |= 1ULL << get_region(cnode);
+ }
+}
+
+#define rou_rflag rou_flags
+
+static int router_distance;
+
+static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
+{
+ klrou_t *router;
+ lboard_t *brd;
+ int port;
+
+ if (router_a->rou_rflag == 1)
+ return;
+
+ if (depth >= router_distance)
+ return;
+
+ router_a->rou_rflag = 1;
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router_a->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router_a->rou_port[port].port_nasid,
+ router_a->rou_port[port].port_offset);
+
+ if (brd->brd_type == KLTYPE_ROUTER) {
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+ if (router == router_b) {
+ if (depth < router_distance)
+ router_distance = depth;
+ }
+ else
+ router_recurse(router, router_b, depth + 1);
+ }
+ }
+
+ router_a->rou_rflag = 0;
+}
+
+unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
+
+static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
+{
+ klrou_t *router, *router_a = NULL, *router_b = NULL;
+ lboard_t *brd, *dest_brd;
+ cnodeid_t cnode;
+ nasid_t nasid;
+ int port;
+
+ /* Figure out which routers nodes in question are connected to */
+ for_each_online_node(cnode) {
+ nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+ if (nasid == -1) continue;
+
+ brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+ KLTYPE_ROUTER);
+
+ if (!brd)
+ continue;
+
+ do {
+ if (brd->brd_flags & DUPLICATE_BOARD)
+ continue;
+
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+ router->rou_rflag = 0;
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router->rou_port[port].port_nasid,
+ router->rou_port[port].port_offset);
+
+ if (dest_brd->brd_type == KLTYPE_IP27) {
+ if (dest_brd->brd_nasid == nasid_a)
+ router_a = router;
+ if (dest_brd->brd_nasid == nasid_b)
+ router_b = router;
+ }
+ }
+
+ } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
+ }
+
+ if (router_a == NULL) {
+ printk("node_distance: router_a NULL\n");
+ return -1;
+ }
+ if (router_b == NULL) {
+ printk("node_distance: router_b NULL\n");
+ return -1;
+ }
+
+ if (nasid_a == nasid_b)
+ return 0;
+
+ if (router_a == router_b)
+ return 1;
+
+ router_distance = 100;
+ router_recurse(router_a, router_b, 2);
+
+ return router_distance;
+}
+
+static void __init init_topology_matrix(void)
+{
+ nasid_t nasid, nasid2;
+ cnodeid_t row, col;
+
+ for (row = 0; row < MAX_COMPACT_NODES; row++)
+ for (col = 0; col < MAX_COMPACT_NODES; col++)
+ __node_distances[row][col] = -1;
+
+ for_each_online_node(row) {
+ nasid = COMPACT_TO_NASID_NODEID(row);
+ for_each_online_node(col) {
+ nasid2 = COMPACT_TO_NASID_NODEID(col);
+ __node_distances[row][col] =
+ compute_node_distance(nasid, nasid2);
+ }
+ }
+}
+
+static void __init dump_topology(void)
+{
+ nasid_t nasid;
+ cnodeid_t cnode;
+ lboard_t *brd, *dest_brd;
+ int port;
+ int router_num = 0;
+ klrou_t *router;
+ cnodeid_t row, col;
+
+ printk("************** Topology ********************\n");
+
+ printk(" ");
+ for_each_online_node(col)
+ printk("%02d ", col);
+ printk("\n");
+ for_each_online_node(row) {
+ printk("%02d ", row);
+ for_each_online_node(col)
+ printk("%2d ", node_distance(row, col));
+ printk("\n");
+ }
+
+ for_each_online_node(cnode) {
+ nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+ if (nasid == -1) continue;
+
+ brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
+ KLTYPE_ROUTER);
+
+ if (!brd)
+ continue;
+
+ do {
+ if (brd->brd_flags & DUPLICATE_BOARD)
+ continue;
+ printk("Router %d:", router_num);
+ router_num++;
+
+ router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
+
+ for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
+ if (router->rou_port[port].port_nasid == INVALID_NASID)
+ continue;
+
+ dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
+ router->rou_port[port].port_nasid,
+ router->rou_port[port].port_offset);
+
+ if (dest_brd->brd_type == KLTYPE_IP27)
+ printk(" %d", dest_brd->brd_nasid);
+ if (dest_brd->brd_type == KLTYPE_ROUTER)
+ printk(" r");
+ }
+ printk("\n");
+
+ } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
+ }
+}
+
+static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+{
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+ return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+}
+
+/*
+ * Return the number of pages of memory provided by the given slot
+ * on the specified node.
+ */
+static pfn_t __init slot_getsize(cnodeid_t node, int slot)
+{
+ return (pfn_t) slot_psize_cache[node][slot];
+}
+
+/*
+ * Return highest slot filled
+ */
+static int __init node_getlastslot(cnodeid_t node)
+{
+ return (int) slot_lastfilled_cache[node];
+}
+
+/*
+ * Return the pfn of the last free page of memory on a node.
+ */
+static pfn_t __init node_getmaxclick(cnodeid_t node)
+{
+ pfn_t slot_psize;
+ int slot;
+
+ /*
+ * Start at the top slot. When we find a slot with memory in it,
+ * that's the winner.
+ */
+ for (slot = (MAX_MEM_SLOTS - 1); slot >= 0; slot--) {
+ if ((slot_psize = slot_getsize(node, slot))) {
+ if (slot_psize == SLOT_IGNORED)
+ continue;
+ /* Return the basepfn + the slot size, minus 1. */
+ return slot_getbasepfn(node, slot) + slot_psize - 1;
+ }
+ }
+
+ /*
+ * If there's no memory on the node, return 0. This is likely
+ * to cause problems.
+ */
+ return 0;
+}
+
+static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+{
+ nasid_t nasid;
+ lboard_t *brd;
+ klmembnk_t *banks;
+ unsigned long size;
+
+ nasid = COMPACT_TO_NASID_NODEID(node);
+ /* Find the node board */
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+ if (!brd)
+ return 0;
+
+ /* Get the memory bank structure */
+ banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
+ if (!banks)
+ return 0;
+
+ /* Size in _Megabytes_ */
+ size = (unsigned long)banks->membnk_bnksz[slot/4];
+
+ /* hack for 128 dimm banks */
+ if (size <= 128) {
+ if (slot % 4 == 0) {
+ size <<= 20; /* size in bytes */
+ return(size >> PAGE_SHIFT);
+ } else
+ return 0;
+ } else {
+ size /= 4;
+ size <<= 20;
+ return size >> PAGE_SHIFT;
+ }
+}
+
+static void __init mlreset(void)
+{
+ int i;
+
+ master_nasid = get_nasid();
+ fine_mode = is_fine_dirmode();
+
+ /*
+ * Probe for all CPUs - this creates the cpumask and sets up the
+ * mapping tables. We need to do this as early as possible.
+ */
+#ifdef CONFIG_SMP
+ cpu_node_probe();
+#endif
+
+ init_topology_matrix();
+ dump_topology();
+
+ gen_region_mask(&region_mask);
+
+ setup_replication_mask();
+
+ /*
+ * Set all nodes' calias sizes to 8k
+ */
+ for_each_online_node(i) {
+ nasid_t nasid;
+
+ nasid = COMPACT_TO_NASID_NODEID(i);
+
+ /*
+ * Always have node 0 in the region mask, otherwise
+ * CALIAS accesses get exceptions since the hub
+ * thinks it is a node 0 address.
+ */
+ REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
+#ifdef CONFIG_REPLICATE_EXHANDLERS
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
+#else
+ REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
+#endif
+
+#ifdef LATER
+ /*
+ * Set up all hubs to have a big window pointing at
+ * widget 0. Memory mode, widget 0, offset 0
+ */
+ REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
+ ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
+ (0 << IIO_ITTE_WIDGET_SHIFT)));
+#endif
+ }
+}
+
+static void __init szmem(void)
+{
+ pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
+ int slot, ignore;
+ cnodeid_t node;
+
+ num_physpages = 0;
+
+ for_each_online_node(node) {
+ ignore = nodebytes = 0;
+ for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
+ slot_psize = slot_psize_compute(node, slot);
+ if (slot == 0)
+ slot0sz = slot_psize;
+ /*
+ * We need to refine the hack when we have replicated
+ * kernel text.
+ */
+ nodebytes += (1LL << SLOT_SHIFT);
+ if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
+ (slot0sz << PAGE_SHIFT))
+ ignore = 1;
+ if (ignore && slot_psize) {
+ printk("Ignoring slot %d onwards on node %d\n",
+ slot, node);
+ slot_psize_cache[node][slot] = SLOT_IGNORED;
+ slot = MAX_MEM_SLOTS;
+ continue;
+ }
+ num_physpages += slot_psize;
+ slot_psize_cache[node][slot] =
+ (unsigned short) slot_psize;
+ if (slot_psize)
+ slot_lastfilled_cache[node] = slot;
+ }
+ }
+}
+
+static void __init node_mem_init(cnodeid_t node)
+{
+ pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
+ pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0);
+ pfn_t slot_freepfn = node_getfirstfree(node);
+ struct pglist_data *pd;
+ unsigned long bootmap_size;
+
+ /*
+ * Allocate the node data structures on the node first.
+ */
+ __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
+
+ pd = NODE_DATA(node);
+ pd->bdata = &plat_node_bdata[node];
+
+ cpus_clear(hub_data(node)->h_cpus);
+
+ slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
+ sizeof(struct hub_data));
+
+ bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
+ slot_firstpfn, slot_lastpfn);
+ free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
+ (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
+ reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
+ ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size);
+}
+
+/*
+ * A node with nothing. We use it to avoid any special casing in
+ * node_to_cpumask
+ */
+static struct node_data null_node = {
+ .hub = {
+ .h_cpus = CPU_MASK_NONE
+ }
+};
+
+/*
+ * Currently, the intranode memory hole support assumes that each slot
+ * contains at least 32 MBytes of memory. We assume all bootmem data
+ * fits on the first slot.
+ */
+void __init prom_meminit(void)
+{
+ cnodeid_t node;
+
+ mlreset();
+ szmem();
+
+ for (node = 0; node < MAX_COMPACT_NODES; node++) {
+ if (node_online(node)) {
+ node_mem_init(node);
+ continue;
+ }
+ __node_data[node] = &null_node;
+ }
+}
+
+unsigned long __init prom_free_prom_memory(void)
+{
+ /* We got nothing to free here ... */
+ return 0;
+}
+
+extern void pagetable_init(void);
+extern unsigned long setup_zero_pages(void);
+
+void __init paging_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned node;
+
+ pagetable_init();
+
+ for_each_online_node(node) {
+ pfn_t start_pfn = slot_getbasepfn(node, 0);
+ pfn_t end_pfn = node_getmaxclick(node) + 1;
+
+ zones_size[ZONE_DMA] = end_pfn - start_pfn;
+ free_area_init_node(node, NODE_DATA(node),
+ zones_size, start_pfn, NULL);
+
+ if (end_pfn > max_low_pfn)
+ max_low_pfn = end_pfn;
+ }
+}
+
+void __init mem_init(void)
+{
+ unsigned long codesize, datasize, initsize, tmp;
+ unsigned node;
+
+ high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
+
+ for_each_online_node(node) {
+ unsigned slot, numslots;
+ struct page *end, *p;
+
+ /*
+ * This will free up the bootmem, ie, slot 0 memory.
+ */
+ totalram_pages += free_all_bootmem_node(NODE_DATA(node));
+
+ /*
+ * We need to manually do the other slots.
+ */
+ numslots = node_getlastslot(node);
+ for (slot = 1; slot <= numslots; slot++) {
+ p = NODE_DATA(node)->node_mem_map +
+ (slot_getbasepfn(node, slot) -
+ slot_getbasepfn(node, 0));
+
+ /*
+ * Free valid memory in current slot.
+ */
+ for (end = p + slot_getsize(node, slot); p < end; p++) {
+ /* if (!page_is_ram(pgnr)) continue; */
+ /* commented out until page_is_ram works */
+ ClearPageReserved(p);
+ set_page_count(p, 1);
+ __free_page(p);
+ totalram_pages++;
+ }
+ }
+ }
+
+ totalram_pages -= setup_zero_pages(); /* This comes from node 0 */
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ tmp = nr_free_pages();
+ printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
+ "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
+ tmp << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10),
+ codesize >> 10,
+ (num_physpages - tmp) << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+}
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
new file mode 100644
index 000000000000..b0a25e1ee8b7
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -0,0 +1,249 @@
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <asm/atomic.h>
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/nmi.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/sn0/hub.h>
+
+#if 0
+#define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
+#else
+#define NODE_NUM_CPUS(n) CPUS_PER_NODE
+#endif
+
+#define CNODEID_NONE (cnodeid_t)-1
+#define enter_panic_mode() spin_lock(&nmi_lock)
+
+typedef unsigned long machreg_t;
+
+DEFINE_SPINLOCK(nmi_lock);
+
+/*
+ * Lets see what else we need to do here. Set up sp, gp?
+ */
+void nmi_dump(void)
+{
+ void cont_nmi_dump(void);
+
+ cont_nmi_dump();
+}
+
+void install_cpu_nmi_handler(int slice)
+{
+ nmi_t *nmi_addr;
+
+ nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
+ if (nmi_addr->call_addr)
+ return;
+ nmi_addr->magic = NMI_MAGIC;
+ nmi_addr->call_addr = (void *)nmi_dump;
+ nmi_addr->call_addr_c =
+ (void *)(~((unsigned long)(nmi_addr->call_addr)));
+ nmi_addr->call_parm = 0;
+}
+
+/*
+ * Copy the cpu registers which have been saved in the IP27prom format
+ * into the eframe format for the node under consideration.
+ */
+
+void nmi_cpu_eframe_save(nasid_t nasid, int slice)
+{
+ struct reg_struct *nr;
+ int i;
+
+ /* Get the pointer to the current cpu's register set. */
+ nr = (struct reg_struct *)
+ (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
+ slice * IP27_NMI_KREGS_CPU_SIZE);
+
+ printk("NMI nasid %d: slice %d\n", nasid, slice);
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ printk("$%2d :", i);
+ printk(" %016lx", nr->gpr[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ printk("\n");
+ }
+
+ printk("Hi : (value lost)\n");
+ printk("Lo : (value lost)\n");
+
+ /*
+ * Saved cp0 registers
+ */
+ printk("epc : %016lx ", nr->epc);
+ print_symbol("%s ", nr->epc);
+ printk("%s\n", print_tainted());
+ printk("ErrEPC: %016lx ", nr->error_epc);
+ print_symbol("%s\n", nr->error_epc);
+ printk("ra : %016lx ", nr->gpr[31]);
+ print_symbol("%s\n", nr->gpr[31]);
+ printk("Status: %08lx ", nr->sr);
+
+ if (nr->sr & ST0_KX)
+ printk("KX ");
+ if (nr->sr & ST0_SX)
+ printk("SX ");
+ if (nr->sr & ST0_UX)
+ printk("UX ");
+
+ switch (nr->sr & ST0_KSU) {
+ case KSU_USER:
+ printk("USER ");
+ break;
+ case KSU_SUPERVISOR:
+ printk("SUPERVISOR ");
+ break;
+ case KSU_KERNEL:
+ printk("KERNEL ");
+ break;
+ default:
+ printk("BAD_MODE ");
+ break;
+ }
+
+ if (nr->sr & ST0_ERL)
+ printk("ERL ");
+ if (nr->sr & ST0_EXL)
+ printk("EXL ");
+ if (nr->sr & ST0_IE)
+ printk("IE ");
+ printk("\n");
+
+ printk("Cause : %08lx\n", nr->cause);
+ printk("PrId : %08x\n", read_c0_prid());
+ printk("BadVA : %016lx\n", nr->badva);
+ printk("CErr : %016lx\n", nr->cache_err);
+ printk("NMI_SR: %016lx\n", nr->nmi_sr);
+
+ printk("\n");
+}
+
+void nmi_dump_hub_irq(nasid_t nasid, int slice)
+{
+ hubreg_t mask0, mask1, pend0, pend1;
+
+ if (slice == 0) { /* Slice A */
+ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
+ mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
+ } else { /* Slice B */
+ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
+ mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
+ }
+
+ pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
+ pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
+
+ printk("PI_INT_MASK0: %16lx PI_INT_MASK1: %16lx\n", mask0, mask1);
+ printk("PI_INT_PEND0: %16lx PI_INT_PEND1: %16lx\n", pend0, pend1);
+ printk("\n\n");
+}
+
+/*
+ * Copy the cpu registers which have been saved in the IP27prom format
+ * into the eframe format for the node under consideration.
+ */
+void nmi_node_eframe_save(cnodeid_t cnode)
+{
+ nasid_t nasid;
+ int slice;
+
+ /* Make sure that we have a valid node */
+ if (cnode == CNODEID_NONE)
+ return;
+
+ nasid = COMPACT_TO_NASID_NODEID(cnode);
+ if (nasid == INVALID_NASID)
+ return;
+
+ /* Save the registers into eframe for each cpu */
+ for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
+ nmi_cpu_eframe_save(nasid, slice);
+ nmi_dump_hub_irq(nasid, slice);
+ }
+}
+
+/*
+ * Save the nmi cpu registers for all cpus in the system.
+ */
+void
+nmi_eframes_save(void)
+{
+ cnodeid_t cnode;
+
+ for_each_online_node(cnode)
+ nmi_node_eframe_save(cnode);
+}
+
+void
+cont_nmi_dump(void)
+{
+#ifndef REAL_NMI_SIGNAL
+ static atomic_t nmied_cpus = ATOMIC_INIT(0);
+
+ atomic_inc(&nmied_cpus);
+#endif
+ /*
+ * Use enter_panic_mode to allow only 1 cpu to proceed
+ */
+ enter_panic_mode();
+
+#ifdef REAL_NMI_SIGNAL
+ /*
+ * Wait up to 15 seconds for the other cpus to respond to the NMI.
+ * If a cpu has not responded after 10 sec, send it 1 additional NMI.
+ * This is for 2 reasons:
+ * - sometimes a MMSC fail to NMI all cpus.
+ * - on 512p SN0 system, the MMSC will only send NMIs to
+ * half the cpus. Unfortunately, we don't know which cpus may be
+ * NMIed - it depends on how the site chooses to configure.
+ *
+ * Note: it has been measure that it takes the MMSC up to 2.3 secs to
+ * send NMIs to all cpus on a 256p system.
+ */
+ for (i=0; i < 1500; i++) {
+ for_each_online_node(node)
+ if (NODEPDA(node)->dump_count == 0)
+ break;
+ if (node == MAX_NUMNODES)
+ break;
+ if (i == 1000) {
+ for_each_online_node(node)
+ if (NODEPDA(node)->dump_count == 0) {
+ cpu = node_to_first_cpu(node);
+ for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
+ CPUMASK_SETB(nmied_cpus, cpu);
+ /*
+ * cputonasid, cputoslice
+ * needs kernel cpuid
+ */
+ SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
+ }
+ }
+
+ }
+ udelay(10000);
+ }
+#else
+ while (atomic_read(&nmied_cpus) != num_online_cpus());
+#endif
+
+ /*
+ * Save the nmi cpu registers for all cpu in the eframe format.
+ */
+ nmi_eframes_save();
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+}
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
new file mode 100644
index 000000000000..2e16be94c78b
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -0,0 +1,81 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Reset an IP27.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/smp.h>
+#include <linux/mmzone.h>
+#include <linux/nodemask.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/reboot.h>
+#include <asm/system.h>
+#include <asm/sgialib.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/sn0/hub.h>
+
+void machine_restart(char *command) __attribute__((noreturn));
+void machine_halt(void) __attribute__((noreturn));
+void machine_power_off(void) __attribute__((noreturn));
+
+#define noreturn while(1); /* Silence gcc. */
+
+/* XXX How to pass the reboot command to the firmware??? */
+static void ip27_machine_restart(char *command)
+{
+#if 0
+ int i;
+#endif
+
+ printk("Reboot started from CPU %d\n", smp_processor_id());
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+#if 0
+ for_each_online_node(i)
+ REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG,
+ PROMOP_REBOOT);
+#else
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+#endif
+ noreturn;
+}
+
+static void ip27_machine_halt(void)
+{
+ int i;
+
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+ for_each_online_node(i)
+ REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG,
+ PROMOP_RESTART);
+ LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
+ noreturn;
+}
+
+static void ip27_machine_power_off(void)
+{
+ /* To do ... */
+ noreturn;
+}
+
+void ip27_reboot_setup(void)
+{
+ _machine_restart = ip27_machine_restart;
+ _machine_halt = ip27_machine_halt;
+ _machine_power_off = ip27_machine_power_off;
+}
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
new file mode 100644
index 000000000000..17f768cba94f
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -0,0 +1,225 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
+ * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/nodemask.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/gda.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/launch.h>
+#include <asm/sn/mapped_kernel.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sn0/hubpi.h>
+#include <asm/sn/sn0/hubio.h>
+#include <asm/sn/sn0/ip27.h>
+
+/*
+ * Takes as first input the PROM assigned cpu id, and the kernel
+ * assigned cpu id as the second.
+ */
+static void alloc_cpupda(cpuid_t cpu, int cpunum)
+{
+ cnodeid_t node = get_cpu_cnode(cpu);
+ nasid_t nasid = COMPACT_TO_NASID_NODEID(node);
+
+ cputonasid(cpunum) = nasid;
+ cpu_data[cpunum].p_nodeid = node;
+ cputoslice(cpunum) = get_cpu_slice(cpu);
+}
+
+static nasid_t get_actual_nasid(lboard_t *brd)
+{
+ klhub_t *hub;
+
+ if (!brd)
+ return INVALID_NASID;
+
+ /* find out if we are a completely disabled brd. */
+ hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
+ if (!hub)
+ return INVALID_NASID;
+ if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
+ return hub->hub_info.physid;
+ else
+ return brd->brd_nasid;
+}
+
+static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
+{
+ static int tot_cpus_found = 0;
+ lboard_t *brd;
+ klcpu_t *acpu;
+ int cpus_found = 0;
+ cpuid_t cpuid;
+
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+
+ do {
+ acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
+ while (acpu) {
+ cpuid = acpu->cpu_info.virtid;
+ /* cnode is not valid for completely disabled brds */
+ if (get_actual_nasid(brd) == brd->brd_nasid)
+ cpuid_to_compact_node[cpuid] = cnode;
+ if (cpuid > highest)
+ highest = cpuid;
+ /* Only let it join in if it's marked enabled */
+ if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
+ (tot_cpus_found != NR_CPUS)) {
+ cpu_set(cpuid, phys_cpu_present_map);
+ alloc_cpupda(cpuid, tot_cpus_found);
+ cpus_found++;
+ tot_cpus_found++;
+ }
+ acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
+ KLSTRUCT_CPU);
+ }
+ brd = KLCF_NEXT(brd);
+ if (!brd)
+ break;
+
+ brd = find_lboard(brd, KLTYPE_IP27);
+ } while (brd);
+
+ return highest;
+}
+
+void cpu_node_probe(void)
+{
+ int i, highest = 0;
+ gda_t *gdap = GDA;
+
+ /*
+ * Initialize the arrays to invalid nodeid (-1)
+ */
+ for (i = 0; i < MAX_COMPACT_NODES; i++)
+ compact_to_nasid_node[i] = INVALID_NASID;
+ for (i = 0; i < MAX_NASIDS; i++)
+ nasid_to_compact_node[i] = INVALID_CNODEID;
+ for (i = 0; i < MAXCPUS; i++)
+ cpuid_to_compact_node[i] = INVALID_CNODEID;
+
+ /*
+ * MCD - this whole "compact node" stuff can probably be dropped,
+ * as we can handle sparse numbering now
+ */
+ nodes_clear(node_online_map);
+ for (i = 0; i < MAX_COMPACT_NODES; i++) {
+ nasid_t nasid = gdap->g_nasidtable[i];
+ if (nasid == INVALID_NASID)
+ break;
+ compact_to_nasid_node[i] = nasid;
+ nasid_to_compact_node[nasid] = i;
+ node_set_online(num_online_nodes());
+ highest = do_cpumask(i, nasid, highest);
+ }
+
+ printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
+}
+
+static void intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend,
+ int base_level)
+{
+ volatile hubreg_t bits;
+ int i;
+
+ /* Check pending interrupts */
+ if ((bits = HUB_L(pend)) != 0)
+ for (i = 0; i < N_INTPEND_BITS; i++)
+ if (bits & (1 << i))
+ LOCAL_HUB_CLR_INTR(base_level + i);
+}
+
+static void intr_clear_all(nasid_t nasid)
+{
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
+ REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
+ intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND0),
+ INT_PEND0_BASELVL);
+ intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND1),
+ INT_PEND1_BASELVL);
+}
+
+void __init prom_prepare_cpus(unsigned int max_cpus)
+{
+ cnodeid_t cnode;
+
+ for_each_online_node(cnode)
+ intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
+
+ replicate_kernel_text();
+
+ /*
+ * Assumption to be fixed: we're always booted on logical / physical
+ * processor 0. While we're always running on logical processor 0
+ * this still means this is physical processor zero; it might for
+ * example be disabled in the firwware.
+ */
+ alloc_cpupda(0, 0);
+}
+
+/*
+ * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
+ * set sp to the kernel stack of the newly created idle process, gp to the proc
+ * struct so that current_thread_info() will work.
+ */
+void __init prom_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned long gp = (unsigned long) idle->thread_info;
+ unsigned long sp = gp + THREAD_SIZE - 32;
+
+ LAUNCH_SLAVE(cputonasid(cpu),cputoslice(cpu),
+ (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
+ 0, (void *) sp, (void *) gp);
+}
+
+void prom_init_secondary(void)
+{
+ per_cpu_init();
+ local_irq_enable();
+}
+
+void __init prom_cpus_done(void)
+{
+}
+
+void prom_smp_finish(void)
+{
+}
+
+void core_send_ipi(int destid, unsigned int action)
+{
+ int irq;
+
+ switch (action) {
+ case SMP_RESCHEDULE_YOURSELF:
+ irq = CPU_RESCHED_A_IRQ;
+ break;
+ case SMP_CALL_FUNCTION:
+ irq = CPU_CALL_A_IRQ;
+ break;
+ default:
+ panic("sendintr");
+ }
+
+ irq += cputoslice(destid);
+
+ /*
+ * Convert the compact hub number to the NASID to get the correct
+ * part of the address space. Then set the interrupt bit associated
+ * with the CPU we want to send the interrupt to.
+ */
+ REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
+}
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
new file mode 100644
index 000000000000..8c1b96fffa76
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -0,0 +1,243 @@
+/*
+ * Copytight (C) 1999, 2000, 05 Ralf Baechle (ralf@linux-mips.org)
+ * Copytight (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/mm.h>
+
+#include <asm/time.h>
+#include <asm/pgtable.h>
+#include <asm/sgialib.h>
+#include <asm/sn/ioc3.h>
+#include <asm/m48t35.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/sn0/ip27.h>
+#include <asm/sn/sn0/hub.h>
+
+/*
+ * This is a hack; we really need to figure these values out dynamically
+ *
+ * Since 800 ns works very well with various HUB frequencies, such as
+ * 360, 380, 390 and 400 MHZ, we use 800 ns rtc cycle time.
+ *
+ * Ralf: which clock rate is used to feed the counter?
+ */
+#define NSEC_PER_CYCLE 800
+#define CYCLES_PER_SEC (NSEC_PER_SEC/NSEC_PER_CYCLE)
+#define CYCLES_PER_JIFFY (CYCLES_PER_SEC/HZ)
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+static unsigned long ct_cur[NR_CPUS]; /* What counter should be at next timer irq */
+static long last_rtc_update; /* Last time the rtc clock got updated */
+
+extern volatile unsigned long wall_jiffies;
+
+#if 0
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int retval = 0;
+ int real_seconds, real_minutes, cmos_minutes;
+ struct m48t35_rtc *rtc;
+ nasid_t nid;
+
+ nid = get_nasid();
+ rtc = (struct m48t35_rtc *)(KL_CONFIG_CH_CONS_INFO(nid)->memory_base +
+ IOC3_BYTEBUS_DEV0);
+
+ rtc->control |= M48T35_RTC_READ;
+ cmos_minutes = BCD2BIN(rtc->min);
+ rtc->control &= ~M48T35_RTC_READ;
+
+ /*
+ * Since we're only adjusting minutes and seconds, don't interfere with
+ * hour overflow. This avoids messing with unknown time zones but
+ * requires your RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - cmos_minutes) < 30) {
+ real_seconds = BIN2BCD(real_seconds);
+ real_minutes = BIN2BCD(real_minutes);
+ rtc->control |= M48T35_RTC_SET;
+ rtc->sec = real_seconds;
+ rtc->min = real_minutes;
+ rtc->control &= ~M48T35_RTC_SET;
+ } else {
+ printk(KERN_WARNING
+ "set_rtc_mmss: can't update from %d to %d\n",
+ cmos_minutes, real_minutes);
+ retval = -1;
+ }
+
+ return retval;
+}
+#endif
+
+void ip27_rt_timer_interrupt(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ int cpuA = cputoslice(cpu) == 0;
+ int irq = 9; /* XXX Assign number */
+
+ irq_enter();
+ write_seqlock(&xtime_lock);
+
+again:
+ LOCAL_HUB_S(cpuA ? PI_RT_PEND_A : PI_RT_PEND_B, 0); /* Ack */
+ ct_cur[cpu] += CYCLES_PER_JIFFY;
+ LOCAL_HUB_S(cpuA ? PI_RT_COMPARE_A : PI_RT_COMPARE_B, ct_cur[cpu]);
+
+ if (LOCAL_HUB_L(PI_RT_COUNT) >= ct_cur[cpu])
+ goto again;
+
+ kstat_this_cpu.irqs[irq]++; /* kstat only for bootcpu? */
+
+ if (cpu == 0)
+ do_timer(regs);
+
+ update_process_times(user_mode(regs));
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to when a second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
+ (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+ if (rtc_set_time(xtime.tv_sec) == 0) {
+ last_rtc_update = xtime.tv_sec;
+ } else {
+ last_rtc_update = xtime.tv_sec - 600;
+ /* do it again in 60 s */
+ }
+ }
+
+ write_sequnlock(&xtime_lock);
+ irq_exit();
+}
+
+unsigned long ip27_do_gettimeoffset(void)
+{
+ unsigned long ct_cur1;
+ ct_cur1 = REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT) + CYCLES_PER_JIFFY;
+ return (ct_cur1 - ct_cur[0]) * NSEC_PER_CYCLE / 1000;
+}
+
+/* Includes for ioc3_init(). */
+#include <asm/sn/types.h>
+#include <asm/sn/sn0/addrs.h>
+#include <asm/sn/sn0/hubni.h>
+#include <asm/sn/sn0/hubio.h>
+#include <asm/pci/bridge.h>
+
+static __init unsigned long get_m48t35_time(void)
+{
+ unsigned int year, month, date, hour, min, sec;
+ struct m48t35_rtc *rtc;
+ nasid_t nid;
+
+ nid = get_nasid();
+ rtc = (struct m48t35_rtc *)(KL_CONFIG_CH_CONS_INFO(nid)->memory_base +
+ IOC3_BYTEBUS_DEV0);
+
+ rtc->control |= M48T35_RTC_READ;
+ sec = rtc->sec;
+ min = rtc->min;
+ hour = rtc->hour;
+ date = rtc->date;
+ month = rtc->month;
+ year = rtc->year;
+ rtc->control &= ~M48T35_RTC_READ;
+
+ sec = BCD2BIN(sec);
+ min = BCD2BIN(min);
+ hour = BCD2BIN(hour);
+ date = BCD2BIN(date);
+ month = BCD2BIN(month);
+ year = BCD2BIN(year);
+
+ year += 1970;
+
+ return mktime(year, month, date, hour, min, sec);
+}
+
+static void ip27_timer_setup(struct irqaction *irq)
+{
+ /* over-write the handler, we use our own way */
+ irq->handler = no_action;
+
+ /* setup irqaction */
+// setup_irq(IP27_TIMER_IRQ, irq); /* XXX Can't do this yet. */
+}
+
+void __init ip27_time_init(void)
+{
+ xtime.tv_sec = get_m48t35_time();
+ xtime.tv_nsec = 0;
+
+ do_gettimeoffset = ip27_do_gettimeoffset;
+
+ board_timer_setup = ip27_timer_setup;
+}
+
+void __init cpu_time_init(void)
+{
+ lboard_t *board;
+ klcpu_t *cpu;
+ int cpuid;
+
+ /* Don't use ARCS. ARCS is fragile. Klconfig is simple and sane. */
+ board = find_lboard(KL_CONFIG_INFO(get_nasid()), KLTYPE_IP27);
+ if (!board)
+ panic("Can't find board info for myself.");
+
+ cpuid = LOCAL_HUB_L(PI_CPU_NUM) ? IP27_CPU0_INDEX : IP27_CPU1_INDEX;
+ cpu = (klcpu_t *) KLCF_COMP(board, cpuid);
+ if (!cpu)
+ panic("No information about myself?");
+
+ printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
+
+ set_c0_status(SRB_TIMOCLK);
+}
+
+void __init hub_rtc_init(cnodeid_t cnode)
+{
+ /*
+ * We only need to initialize the current node.
+ * If this is not the current node then it is a cpuless
+ * node and timeouts will not happen there.
+ */
+ if (get_compact_nodeid() == cnode) {
+ int cpu = smp_processor_id();
+ LOCAL_HUB_S(PI_RT_EN_A, 1);
+ LOCAL_HUB_S(PI_RT_EN_B, 1);
+ LOCAL_HUB_S(PI_PROF_EN_A, 0);
+ LOCAL_HUB_S(PI_PROF_EN_B, 0);
+ ct_cur[cpu] = CYCLES_PER_JIFFY;
+ LOCAL_HUB_S(PI_RT_COMPARE_A, ct_cur[cpu]);
+ LOCAL_HUB_S(PI_RT_COUNT, 0);
+ LOCAL_HUB_S(PI_RT_PEND_A, 0);
+ LOCAL_HUB_S(PI_RT_COMPARE_B, ct_cur[cpu]);
+ LOCAL_HUB_S(PI_RT_COUNT, 0);
+ LOCAL_HUB_S(PI_RT_PEND_B, 0);
+ }
+}
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
new file mode 100644
index 000000000000..fc82f34a32ce
--- /dev/null
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000 Silcon Graphics, Inc.
+ * Copyright (C) 2004 Christoph Hellwig.
+ * Released under GPL v2.
+ *
+ * Generic XTALK initialization code
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/sn/types.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/hub.h>
+#include <asm/pci/bridge.h>
+#include <asm/xtalk/xtalk.h>
+
+
+#define XBOW_WIDGET_PART_NUM 0x0
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
+#define BASE_XBOW_PORT 8 /* Lowest external port */
+
+extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
+
+static int __init probe_one_port(nasid_t nasid, int widget, int masterwid)
+{
+ widgetreg_t widget_id;
+ xwidget_part_num_t partnum;
+
+ widget_id = *(volatile widgetreg_t *)
+ (RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ printk(KERN_INFO "Cpu %d, Nasid 0x%x, widget 0x%x (partnum 0x%x) is ",
+ smp_processor_id(), nasid, widget, partnum);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ case XBRIDGE_WIDGET_PART_NUM:
+ bridge_probe(nasid, widget, masterwid);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __init xbow_probe(nasid_t nasid)
+{
+ lboard_t *brd;
+ klxbow_t *xbow_p;
+ unsigned masterwid, i;
+
+ printk("is xbow\n");
+
+ /*
+ * found xbow, so may have multiple bridges
+ * need to probe xbow
+ */
+ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_MIDPLANE8);
+ if (!brd)
+ return -ENODEV;
+
+ xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW);
+ if (!xbow_p)
+ return -ENODEV;
+
+ /*
+ * Okay, here's a xbow. Lets arbitrate and find
+ * out if we should initialize it. Set enabled
+ * hub connected at highest or lowest widget as
+ * master.
+ */
+#ifdef WIDGET_A
+ i = HUB_WIDGET_ID_MAX + 1;
+ do {
+ i--;
+ } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
+ (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
+#else
+ i = HUB_WIDGET_ID_MIN - 1;
+ do {
+ i++;
+ } while ((!XBOW_PORT_TYPE_HUB(xbow_p, i)) ||
+ (!XBOW_PORT_IS_ENABLED(xbow_p, i)));
+#endif
+
+ masterwid = i;
+ if (nasid != XBOW_PORT_NASID(xbow_p, i))
+ return 1;
+
+ for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++) {
+ if (XBOW_PORT_IS_ENABLED(xbow_p, i) &&
+ XBOW_PORT_TYPE_IO(xbow_p, i))
+ probe_one_port(nasid, i, masterwid);
+ }
+
+ return 0;
+}
+
+void __init xtalk_probe_node(cnodeid_t nid)
+{
+ volatile u64 hubreg;
+ nasid_t nasid;
+ xwidget_part_num_t partnum;
+ widgetreg_t widget_id;
+
+ nasid = COMPACT_TO_NASID_NODEID(nid);
+ hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
+
+ /* check whether the link is up */
+ if (!(hubreg & IIO_LLP_CSR_IS_UP))
+ return;
+
+ widget_id = *(volatile widgetreg_t *)
+ (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
+ partnum = XWIDGET_PART_NUM(widget_id);
+
+ printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
+ smp_processor_id(), nasid, partnum);
+
+ switch (partnum) {
+ case BRIDGE_WIDGET_PART_NUM:
+ bridge_probe(nasid, 0x8, 0xa);
+ break;
+ case XBOW_WIDGET_PART_NUM:
+ case XXBOW_WIDGET_PART_NUM:
+ xbow_probe(nasid);
+ break;
+ default:
+ printk(" unknown widget??\n");
+ break;
+ }
+}