diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-09 08:06:31 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-09 08:06:31 -0700 |
commit | d9a9a23ff2b00463f25e880d13364938b321ab8a (patch) | |
tree | e5ddb542d535ea741227265545a885636f8967aa /arch/powerpc | |
parent | 9b013c2820c409ff84871e55e407ec2181782773 (diff) | |
parent | 53962ecf6ebbdb5b15a8b35fbefe34430eb25609 (diff) | |
download | linux-d9a9a23ff2b00463f25e880d13364938b321ab8a.tar.gz linux-d9a9a23ff2b00463f25e880d13364938b321ab8a.tar.bz2 linux-d9a9a23ff2b00463f25e880d13364938b321ab8a.zip |
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (23 commits)
[POWERPC] Remove leftover printk in isa-bridge.c
[POWERPC] Remove duplicate #include
[POWERPC] Initialize lockdep earlier
[POWERPC] Document when printk is useable
[POWERPC] Fix bogus paca->_current initialization
[POWERPC] Fix of_i2c include for module compilation
[POWERPC] Make default cputable entries reflect selected CPU family
[POWERPC] spufs: lockdep annotations for spufs_dir_close
[POWERPC] spufs: don't requeue victim contex in find_victim if it's not in spu_run
[POWERPC] 4xx: Fix PCI mem in sequoia DTS
[POWERPC] 4xx: Add endpoint support to 4xx PCIe driver
[POWERPC] 4xx: Fix problem with new TLB storage attibute fields on 440x6 core
[POWERPC] spufs: spu_create should send inotify IM_CREATE event
[POWERPC] spufs: handle faults while the context switch pending flag is set
[POWERPC] spufs: fix concurrent delivery of class 0 & 1 exceptions
[POWERPC] spufs: try to route SPU interrupts to local node
[POWERPC] spufs: set SPU_CONTEXT_SWITCH_PENDING before synchronising SPU irqs
[POWERPC] spufs: don't acquire state_mutex interruptible while performing callback
[POWERPC] spufs: update master runcntl with context lock held
[POWERPC] spufs: fix post-stopped update of MFC_CNTL register
...
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/boot/dts/sequoia.dts | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/btext.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 53 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/isa-bridge.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/interrupt.c | 53 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 31 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_priv1_mmio.c | 16 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/fault.c | 17 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/inode.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 38 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 3 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 71 | ||||
-rw-r--r-- | arch/powerpc/sysdev/ppc4xx_pci.c | 180 | ||||
-rw-r--r-- | arch/powerpc/xmon/xmon.c | 6 |
18 files changed, 384 insertions, 137 deletions
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts index a1ae4d6ec990..72d67564bdfc 100644 --- a/arch/powerpc/boot/dts/sequoia.dts +++ b/arch/powerpc/boot/dts/sequoia.dts @@ -342,9 +342,14 @@ /* Outbound ranges, one memory and one IO, * later cannot be changed. Chip supports a second * IO range but we don't use it for now + * From the 440EPx user manual: + * PCI 1 Memory 1 8000 0000 1 BFFF FFFF 1GB + * I/O 1 E800 0000 1 E800 FFFF 64KB + * I/O 1 E880 0000 1 EBFF FFFF 56MB */ - ranges = <02000000 0 80000000 1 80000000 0 10000000 - 01000000 0 00000000 1 e8000000 0 00100000>; + ranges = <02000000 0 80000000 1 80000000 0 40000000 + 01000000 0 00000000 1 e8000000 0 00010000 + 01000000 0 00000000 1 e8800000 0 03800000>; /* Inbound 2GB range starting at 0 */ dma-ranges = <42000000 0 0 0 0 0 80000000>; diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 9f9377745490..d8f0329b1344 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -16,7 +16,6 @@ #include <asm/mmu.h> #include <asm/pgtable.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/processor.h> #include <asm/udbg.h> diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 36080d4d1922..81738a4b3c3a 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1208,6 +1208,18 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_4xx, .platform = "ppc405", }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic 40x PPC)", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | + PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + } #endif /* CONFIG_40x */ #ifdef CONFIG_44x @@ -1421,8 +1433,18 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_440A, .platform = "ppc440", }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic 44x PPC)", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc440", + } #endif /* CONFIG_44x */ -#ifdef CONFIG_FSL_BOOKE #ifdef CONFIG_E200 { /* e200z5 */ .pvr_mask = 0xfff00000, @@ -1451,7 +1473,19 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_e200, .platform = "ppc5554", }, -#elif defined(CONFIG_E500) + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic E200 PPC)", + .cpu_features = CPU_FTRS_E200, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_EFP_SINGLE | + PPC_FEATURE_UNIFIED_CACHE, + .dcache_bsize = 32, + .machine_check = machine_check_e200, + .platform = "ppc5554", +#endif /* CONFIG_E200 */ +#ifdef CONFIG_E500 { /* e500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80200000, @@ -1487,20 +1521,19 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_e500, .platform = "ppc8548", }, -#endif -#endif -#if !CLASSIC_PPC { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, - .cpu_name = "(generic PPC)", - .cpu_features = CPU_FTRS_GENERIC_32, - .cpu_user_features = PPC_FEATURE_32, + .cpu_name = "(generic E500 PPC)", + .cpu_features = CPU_FTRS_E500, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP, .icache_bsize = 32, .dcache_bsize = 32, + .machine_check = machine_check_e500, .platform = "powerpc", - } -#endif /* !CLASSIC_PPC */ +#endif /* CONFIG_E500 */ #endif /* CONFIG_PPC32 */ }; diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index b84ec6a2fc94..c2b9dc4fce5d 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -653,7 +653,14 @@ finish_tlb_load: rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */ rlwimi r12, r10, 0, 26, 31 /* Insert static perms */ - rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */ + + /* + * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added + * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see + * include/asm-powerpc/pgtable-ppc32.h for details). + */ + rlwinm r12, r12, 0, 20, 10 + tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */ /* Done...restore registers and get out of here. diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 024805e1747d..25e84c0e1166 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -1517,10 +1517,6 @@ _INIT_STATIC(start_here_multiplatform) addi r2,r2,0x4000 add r2,r2,r26 - /* Set initial ptr to current */ - LOAD_REG_IMMEDIATE(r4, init_task) - std r4,PACACURRENT(r13) - /* Do very early kernel initializations, including initial hash table, * stab and slb setup before we turn on relocation. */ diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 289af348978d..4d5731b2429a 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -108,9 +108,6 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, if (size > 0x10000) size = 0x10000; - printk(KERN_ERR "no ISA IO ranges or unexpected isa range, " - "mapping 64k\n"); - __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, size, _PAGE_NO_CACHE|_PAGE_GUARDED); return; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 25e3fd8606ab..098fd96a394a 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -170,6 +170,8 @@ void __init setup_paca(int cpu) void __init early_setup(unsigned long dt_ptr) { + /* -------- printk is _NOT_ safe to use here ! ------- */ + /* Fill in any unititialised pacas */ initialise_pacas(); @@ -179,12 +181,14 @@ void __init early_setup(unsigned long dt_ptr) /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ setup_paca(0); - /* Enable early debugging if any specified (see udbg.h) */ - udbg_early_init(); - /* Initialize lockdep early or else spinlocks will blow */ lockdep_init(); + /* -------- printk is now safe to use ------- */ + + /* Enable early debugging if any specified (see udbg.h) */ + udbg_early_init(); + DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 04f74f9f9ab6..5bf7df146022 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -35,6 +35,7 @@ #include <linux/percpu.h> #include <linux/types.h> #include <linux/ioport.h> +#include <linux/kernel_stat.h> #include <asm/io.h> #include <asm/pgtable.h> @@ -231,6 +232,54 @@ static int iic_host_match(struct irq_host *h, struct device_node *node) "IBM,CBEA-Internal-Interrupt-Controller"); } +extern int noirqdebug; + +static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) +{ + const unsigned int cpu = smp_processor_id(); + + spin_lock(&desc->lock); + + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + + /* + * If we're currently running this IRQ, or its disabled, + * we shouldn't process the IRQ. Mark it pending, handle + * the necessary masking and go out + */ + if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || + !desc->action)) { + desc->status |= IRQ_PENDING; + goto out_eoi; + } + + kstat_cpu(cpu).irqs[irq]++; + + /* Mark the IRQ currently in progress.*/ + desc->status |= IRQ_INPROGRESS; + + do { + struct irqaction *action = desc->action; + irqreturn_t action_ret; + + if (unlikely(!action)) + goto out_eoi; + + desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + spin_lock(&desc->lock); + + } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); + + desc->status &= ~IRQ_INPROGRESS; +out_eoi: + desc->chip->eoi(irq); + spin_unlock(&desc->lock); +} + static int iic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { @@ -240,10 +289,10 @@ static int iic_host_map(struct irq_host *h, unsigned int virq, break; case IIC_IRQ_TYPE_IOEXC: set_irq_chip_and_handler(virq, &iic_ioexc_chip, - handle_fasteoi_irq); + handle_iic_irq); break; default: - set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); + set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); } return 0; } diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 6bab44b7716b..70c660121ec4 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -141,6 +141,10 @@ static void spu_restart_dma(struct spu *spu) if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); + else { + set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); + mb(); + } } static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) @@ -226,11 +230,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) return 0; } - spu->class_0_pending = 0; - spu->dar = ea; - spu->dsisr = dsisr; + spu->class_1_dar = ea; + spu->class_1_dsisr = dsisr; + + spu->stop_callback(spu, 1); - spu->stop_callback(spu); + spu->class_1_dar = 0; + spu->class_1_dsisr = 0; return 0; } @@ -318,11 +324,15 @@ spu_irq_class_0(int irq, void *data) stat = spu_int_stat_get(spu, 0) & mask; spu->class_0_pending |= stat; - spu->dsisr = spu_mfc_dsisr_get(spu); - spu->dar = spu_mfc_dar_get(spu); + spu->class_0_dsisr = spu_mfc_dsisr_get(spu); + spu->class_0_dar = spu_mfc_dar_get(spu); spin_unlock(&spu->register_lock); - spu->stop_callback(spu); + spu->stop_callback(spu, 0); + + spu->class_0_pending = 0; + spu->class_0_dsisr = 0; + spu->class_0_dar = 0; spu_int_stat_clear(spu, 0, stat); @@ -363,6 +373,9 @@ spu_irq_class_1(int irq, void *data) if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) ; + spu->class_1_dsisr = 0; + spu->class_1_dar = 0; + return stat ? IRQ_HANDLED : IRQ_NONE; } @@ -396,10 +409,10 @@ spu_irq_class_2(int irq, void *data) spu->ibox_callback(spu); if (stat & CLASS2_SPU_STOP_INTR) - spu->stop_callback(spu); + spu->stop_callback(spu, 2); if (stat & CLASS2_SPU_HALT_INTR) - spu->stop_callback(spu); + spu->stop_callback(spu, 2); if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) spu->mfc_callback(spu); diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c index 67fa7247b80a..906a0a2a9fe1 100644 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c @@ -28,6 +28,7 @@ #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> +#include <linux/sched.h> #include <asm/spu.h> #include <asm/spu_priv1.h> @@ -75,8 +76,19 @@ static u64 int_stat_get(struct spu *spu, int class) static void cpu_affinity_set(struct spu *spu, int cpu) { - u64 target = iic_get_target_id(cpu); - u64 route = target << 48 | target << 32 | target << 16; + u64 target; + u64 route; + + if (nr_cpus_node(spu->node)) { + cpumask_t spumask = node_to_cpumask(spu->node); + cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); + + if (!cpus_intersects(spumask, cpumask)) + return; + } + + target = iic_get_target_id(cpu); + route = target << 48 | target << 32 | target << 16; out_be64(&spu->priv1->int_route_RW, route); } diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index e46d300e21a5..f093a581ac74 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx) return 0; if (stat & CLASS0_DMA_ALIGNMENT_INTR) - spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT); + spufs_handle_event(ctx, ctx->csa.class_0_dar, + SPE_EVENT_DMA_ALIGNMENT); if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) - spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA); + spufs_handle_event(ctx, ctx->csa.class_0_dar, + SPE_EVENT_INVALID_DMA); if (stat & CLASS0_SPU_ERROR_INTR) - spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR); + spufs_handle_event(ctx, ctx->csa.class_0_dar, + SPE_EVENT_SPE_ERROR); + + ctx->csa.class_0_pending = 0; return -EIO; } @@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx) * in time, we can still expect to get the same fault * the immediately after the context restore. */ - ea = ctx->csa.dar; - dsisr = ctx->csa.dsisr; + ea = ctx->csa.class_1_dar; + dsisr = ctx->csa.class_1_dsisr; if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) return 0; @@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx) * time slicing will not preempt the context while the page fault * handler is running. Context switch code removes mappings. */ - ctx->csa.dar = ctx->csa.dsisr = 0; + ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; /* * If we handled the fault successfully and are in runnable diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 0c32a05ab068..f407b2471855 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -23,6 +23,7 @@ #include <linux/file.h> #include <linux/fs.h> +#include <linux/fsnotify.h> #include <linux/backing-dev.h> #include <linux/init.h> #include <linux/ioctl.h> @@ -223,7 +224,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) parent = dir->d_parent->d_inode; ctx = SPUFS_I(dir->d_inode)->i_ctx; - mutex_lock(&parent->i_mutex); + mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT); ret = spufs_rmdir(parent, dir); mutex_unlock(&parent->i_mutex); WARN_ON(ret); @@ -618,12 +619,15 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, mode &= ~current->fs->umask; if (flags & SPU_CREATE_GANG) - return spufs_create_gang(nd->path.dentry->d_inode, + ret = spufs_create_gang(nd->path.dentry->d_inode, dentry, nd->path.mnt, mode); else - return spufs_create_context(nd->path.dentry->d_inode, + ret = spufs_create_context(nd->path.dentry->d_inode, dentry, nd->path.mnt, flags, mode, filp); + if (ret >= 0) + fsnotify_mkdir(nd->path.dentry->d_inode, dentry); + return ret; out_dput: dput(dentry); diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index a9c35b7b719f..b7493b865812 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -11,7 +11,7 @@ #include "spufs.h" /* interrupt-level stop callback function. */ -void spufs_stop_callback(struct spu *spu) +void spufs_stop_callback(struct spu *spu, int irq) { struct spu_context *ctx = spu->ctx; @@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu) */ if (ctx) { /* Copy exception arguments into module specific structure */ - ctx->csa.class_0_pending = spu->class_0_pending; - ctx->csa.dsisr = spu->dsisr; - ctx->csa.dar = spu->dar; + switch(irq) { + case 0 : + ctx->csa.class_0_pending = spu->class_0_pending; + ctx->csa.class_0_dsisr = spu->class_0_dsisr; + ctx->csa.class_0_dar = spu->class_0_dar; + break; + case 1 : + ctx->csa.class_1_dsisr = spu->class_1_dsisr; + ctx->csa.class_1_dar = spu->class_1_dar; + break; + case 2 : + break; + } /* ensure that the exception status has hit memory before a * thread waiting on the context's stop queue is woken */ @@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu) wake_up_all(&ctx->stop_wq); } - - /* Clear callback arguments from spu structure */ - spu->class_0_pending = 0; - spu->dsisr = 0; - spu->dar = 0; } int spu_stopped(struct spu_context *ctx, u32 *stat) @@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat) if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) return 1; - dsisr = ctx->csa.dsisr; + dsisr = ctx->csa.class_0_dsisr; + if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) + return 1; + + dsisr = ctx->csa.class_1_dsisr; if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) return 1; @@ -294,7 +303,7 @@ static int spu_process_callback(struct spu_context *ctx) u32 ls_pointer, npc; void __iomem *ls; long spu_ret; - int ret, ret2; + int ret; /* get syscall block from local store */ npc = ctx->ops->npc_read(ctx) & ~3; @@ -316,11 +325,9 @@ static int spu_process_callback(struct spu_context *ctx) if (spu_ret <= -ERESTARTSYS) { ret = spu_handle_restartsys(ctx, &spu_ret, &npc); } - ret2 = spu_acquire(ctx); + mutex_lock(&ctx->state_mutex); if (ret == -ERESTARTSYS) return ret; - if (ret2) - return -EINTR; } /* need to re-get the ls, as it may have changed when we released the @@ -343,13 +350,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) if (mutex_lock_interruptible(&ctx->run_mutex)) return -ERESTARTSYS; - spu_enable_spu(ctx); ctx->event_return = 0; ret = spu_acquire(ctx); if (ret) goto out_unlock; + spu_enable_spu(ctx); + spu_update_sched_info(ctx); ret = spu_run_init(ctx, npc); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 7298e7db2c83..2e411f23462b 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -140,6 +140,9 @@ void __spu_update_sched_info(struct spu_context *ctx) * if it is timesliced or preempted. */ ctx->cpus_allowed = current->cpus_allowed; + + /* Save the current cpu id for spu interrupt routing. */ + ctx->last_ran = raw_smp_processor_id(); } void spu_update_sched_info(struct spu_context *ctx) @@ -243,7 +246,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); spu_restore(&ctx->csa, spu); spu->timestamp = jiffies; - spu_cpu_affinity_set(spu, raw_smp_processor_id()); spu_switch_notify(spu, ctx); ctx->state = SPU_STATE_RUNNABLE; @@ -657,7 +659,8 @@ static struct spu *find_victim(struct spu_context *ctx) victim->stats.invol_ctx_switch++; spu->stats.invol_ctx_switch++; - spu_add_to_rq(victim); + if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) + spu_add_to_rq(victim); mutex_unlock(&victim->state_mutex); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 7312745b7540..454c277c1457 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -121,6 +121,7 @@ struct spu_context { cpumask_t cpus_allowed; int policy; int prio; + int last_ran; /* statistics */ struct { @@ -331,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data); /* irq callback funcs. */ void spufs_ibox_callback(struct spu *spu); void spufs_wbox_callback(struct spu *spu); -void spufs_stop_callback(struct spu *spu); +void spufs_stop_callback(struct spu *spu, int irq); void spufs_mfc_callback(struct spu *spu); void spufs_dma_callback(struct spu *spu, int type); diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index d2a1249d36dd..3df9a36eb2f5 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -132,6 +132,14 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); + + /* + * This flag needs to be set before calling synchronize_irq so + * that the update will be visible to the relevant handlers + * via a simple load. + */ + set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); + clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); synchronize_irq(spu->irqs[0]); synchronize_irq(spu->irqs[1]); synchronize_irq(spu->irqs[2]); @@ -166,9 +174,8 @@ static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) /* Save, Step 7: * Restore, Step 5: * Set a software context switch pending flag. + * Done above in Step 3 - disable_interrupts(). */ - set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); - mb(); } static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) @@ -186,20 +193,21 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) MFC_CNTL_SUSPEND_COMPLETE); /* fall through */ case MFC_CNTL_SUSPEND_COMPLETE: - if (csa) { + if (csa) csa->priv2.mfc_control_RW = - MFC_CNTL_SUSPEND_MASK | + in_be64(&priv2->mfc_control_RW) | MFC_CNTL_SUSPEND_DMA_QUEUE; - } break; case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); - if (csa) { - csa->priv2.mfc_control_RW = 0; - } + if (csa) + csa->priv2.mfc_control_RW = + in_be64(&priv2->mfc_control_RW) & + ~MFC_CNTL_SUSPEND_DMA_QUEUE & + ~MFC_CNTL_SUSPEND_MASK; break; } } @@ -249,16 +257,21 @@ static inline void save_spu_status(struct spu_state *csa, struct spu *spu) } } -static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu) +static inline void save_mfc_stopped_status(struct spu_state *csa, + struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; + const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | + MFC_CNTL_DMA_QUEUES_EMPTY; /* Save, Step 12: * Read MFC_CNTL[Ds]. Update saved copy of * CSA.MFC_CNTL[Ds]. + * + * update: do the same with MFC_CNTL[Q]. */ - csa->priv2.mfc_control_RW |= - in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING; + csa->priv2.mfc_control_RW &= ~mask; + csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; } static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) @@ -462,7 +475,9 @@ static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) * Restore, Step 14. * Write MFC_CNTL[Pc]=1 (purge queue). */ - out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); + out_be64(&priv2->mfc_control_RW, + MFC_CNTL_PURGE_DMA_REQUEST | + MFC_CNTL_SUSPEND_MASK); eieio(); } @@ -725,10 +740,14 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu) /* Save, Step 48: * Restore, Step 23. * Change the software context switch pending flag - * to context switch active. + * to context switch active. This implementation does + * not uses a switch active flag. * - * This implementation does not uses a switch active flag. + * Now that we have saved the mfc in the csa, we can add in the + * restart command if an exception occurred. */ + if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) + csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); mb(); } @@ -1690,6 +1709,13 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) eieio(); } +static inline void set_int_route(struct spu_state *csa, struct spu *spu) +{ + struct spu_context *ctx = spu->ctx; + + spu_cpu_affinity_set(spu, ctx->last_ran); +} + static inline void restore_other_spu_access(struct spu_state *csa, struct spu *spu) { @@ -1721,15 +1747,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) */ out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); eieio(); + /* - * FIXME: this is to restart a DMA that we were processing - * before the save. better remember the fault information - * in the csa instead. + * The queue is put back into the same state that was evident prior to + * the context switch. The suspend flag is added to the saved state in + * the csa, if the operational state was suspending or suspended. In + * this case, the code that suspended the mfc is responsible for + * continuing it. Note that SPE faults do not change the operational + * state of the spu. */ - if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) { - out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); - eieio(); - } } static inline void enable_user_access(struct spu_state *csa, struct spu *spu) @@ -1788,7 +1814,7 @@ static int quiece_spu(struct spu_state *prev, struct spu *spu) save_spu_runcntl(prev, spu); /* Step 9. */ save_mfc_sr1(prev, spu); /* Step 10. */ save_spu_status(prev, spu); /* Step 11. */ - save_mfc_decr(prev, spu); /* Step 12. */ + save_mfc_stopped_status(prev, spu); /* Step 12. */ halt_mfc_decr(prev, spu); /* Step 13. */ save_timebase(prev, spu); /* Step 14. */ remove_other_spu_access(prev, spu); /* Step 15. */ @@ -2000,6 +2026,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu) check_ppuint_mb_stat(next, spu); /* Step 67. */ spu_invalidate_slbs(spu); /* Modified Step 68. */ restore_mfc_sr1(next, spu); /* Step 69. */ + set_int_route(next, spu); /* NEW */ restore_other_spu_access(next, spu); /* Step 70. */ restore_spu_runcntl(next, spu); /* Step 71. */ restore_mfc_cntl(next, spu); /* Step 72. */ diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index 1814adbd2236..b4a54c52e880 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c @@ -1387,28 +1387,59 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, resource_size_t size = res->end - res->start + 1; u64 sa; - /* Calculate window size */ - sa = (0xffffffffffffffffull << ilog2(size));; - if (res->flags & IORESOURCE_PREFETCH) - sa |= 0x8; + if (port->endpoint) { + resource_size_t ep_addr = 0; + resource_size_t ep_size = 32 << 20; + + /* Currently we map a fixed 64MByte window to PLB address + * 0 (SDRAM). This should probably be configurable via a dts + * property. + */ + + /* Calculate window size */ + sa = (0xffffffffffffffffull << ilog2(ep_size));; + + /* Setup BAR0 */ + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) | + PCI_BASE_ADDRESS_MEM_TYPE_64); - out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); - out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); + /* Disable BAR1 & BAR2 */ + out_le32(mbase + PECFG_BAR1MPA, 0); + out_le32(mbase + PECFG_BAR2HMPA, 0); + out_le32(mbase + PECFG_BAR2LMPA, 0); - /* The setup of the split looks weird to me ... let's see if it works */ - out_le32(mbase + PECFG_PIM0LAL, 0x00000000); - out_le32(mbase + PECFG_PIM0LAH, 0x00000000); - out_le32(mbase + PECFG_PIM1LAL, 0x00000000); - out_le32(mbase + PECFG_PIM1LAH, 0x00000000); - out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); - out_le32(mbase + PECFG_PIM01SAL, 0x00000000); + out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa)); + + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr)); + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr)); + } else { + /* Calculate window size */ + sa = (0xffffffffffffffffull << ilog2(size));; + if (res->flags & IORESOURCE_PREFETCH) + sa |= 0x8; + + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); + + /* The setup of the split looks weird to me ... let's see + * if it works + */ + out_le32(mbase + PECFG_PIM0LAL, 0x00000000); + out_le32(mbase + PECFG_PIM0LAH, 0x00000000); + out_le32(mbase + PECFG_PIM1LAL, 0x00000000); + out_le32(mbase + PECFG_PIM1LAH, 0x00000000); + out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); + out_le32(mbase + PECFG_PIM01SAL, 0x00000000); + + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); + } /* Enable inbound mapping */ out_le32(mbase + PECFG_PIMEN, 0x1); - out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); - out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); - /* Enable I/O, Mem, and Busmaster cycles */ out_le16(mbase + PCI_COMMAND, in_le16(mbase + PCI_COMMAND) | @@ -1422,13 +1453,8 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) const int *bus_range; int primary = 0, busses; void __iomem *mbase = NULL, *cfg_data = NULL; - - /* XXX FIXME: Handle endpoint mode properly */ - if (port->endpoint) { - printk(KERN_WARNING "PCIE%d: Port in endpoint mode !\n", - port->index); - return; - } + const u32 *pval; + u32 val; /* Check if primary bridge */ if (of_get_property(port->node, "primary", NULL)) @@ -1462,21 +1488,30 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) hose->last_busno = hose->first_busno + busses; } - /* We map the external config space in cfg_data and the host config - * space in cfg_addr. External space is 1M per bus, internal space - * is 4K + if (!port->endpoint) { + /* Only map the external config space in cfg_data for + * PCIe root-complexes. External space is 1M per bus + */ + cfg_data = ioremap(port->cfg_space.start + + (hose->first_busno + 1) * 0x100000, + busses * 0x100000); + if (cfg_data == NULL) { + printk(KERN_ERR "%s: Can't map external config space !", + port->node->full_name); + goto fail; + } + hose->cfg_data = cfg_data; + } + + /* Always map the host config space in cfg_addr. + * Internal space is 4K */ - cfg_data = ioremap(port->cfg_space.start + - (hose->first_busno + 1) * 0x100000, - busses * 0x100000); mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); - if (cfg_data == NULL || mbase == NULL) { - printk(KERN_ERR "%s: Can't map config space !", + if (mbase == NULL) { + printk(KERN_ERR "%s: Can't map internal config space !", port->node->full_name); goto fail; } - - hose->cfg_data = cfg_data; hose->cfg_addr = mbase; pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name, @@ -1489,12 +1524,14 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) port->hose = hose; mbase = (void __iomem *)hose->cfg_addr; - /* - * Set bus numbers on our root port - */ - out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); - out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); - out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); + if (!port->endpoint) { + /* + * Set bus numbers on our root port + */ + out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); + out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); + out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); + } /* * OMRs are already reset, also disable PIMs @@ -1515,17 +1552,49 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window); /* The root complex doesn't show up if we don't set some vendor - * and device IDs into it. Those are the same bogus one that the - * initial code in arch/ppc add. We might want to change that. + * and device IDs into it. The defaults below are the same bogus + * one that the initial code in arch/ppc had. This can be + * overwritten by setting the "vendor-id/device-id" properties + * in the pciex node. */ - out_le16(mbase + 0x200, 0xaaa0 + port->index); - out_le16(mbase + 0x202, 0xbed0 + port->index); - /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ - out_le32(mbase + 0x208, 0x06040001); + /* Get the (optional) vendor-/device-id from the device-tree */ + pval = of_get_property(port->node, "vendor-id", NULL); + if (pval) { + val = *pval; + } else { + if (!port->endpoint) + val = 0xaaa0 + port->index; + else + val = 0xeee0 + port->index; + } + out_le16(mbase + 0x200, val); + + pval = of_get_property(port->node, "device-id", NULL); + if (pval) { + val = *pval; + } else { + if (!port->endpoint) + val = 0xbed0 + port->index; + else + val = 0xfed0 + port->index; + } + out_le16(mbase + 0x202, val); + + if (!port->endpoint) { + /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ + out_le32(mbase + 0x208, 0x06040001); + + printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", + port->index); + } else { + /* Set Class Code to Processor/PPC */ + out_le32(mbase + 0x208, 0x0b200001); + + printk(KERN_INFO "PCIE%d: successfully set as endpoint\n", + port->index); + } - printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", - port->index); return; fail: if (hose) @@ -1542,6 +1611,7 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) const u32 *pval; int portno; unsigned int dcrs; + const char *val; /* First, proceed to core initialization as we assume there's * only one PCIe core in the system @@ -1573,8 +1643,20 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) } port->sdr_base = *pval; - /* XXX Currently, we only support root complex mode */ - port->endpoint = 0; + /* Check if device_type property is set to "pci" or "pci-endpoint". + * Resulting from this setup this PCIe port will be configured + * as root-complex or as endpoint. + */ + val = of_get_property(port->node, "device_type", NULL); + if (!strcmp(val, "pci-endpoint")) { + port->endpoint = 1; + } else if (!strcmp(val, "pci")) { + port->endpoint = 0; + } else { + printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n", + np->full_name); + return; + } /* Fetch config space registers address */ if (of_address_to_resource(np, 0, &port->cfg_space)) { diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 52c74780f403..1702de9395ee 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2842,9 +2842,11 @@ static void dump_spu_fields(struct spu *spu) DUMP_FIELD(spu, "0x%lx", ls_size); DUMP_FIELD(spu, "0x%x", node); DUMP_FIELD(spu, "0x%lx", flags); - DUMP_FIELD(spu, "0x%lx", dar); - DUMP_FIELD(spu, "0x%lx", dsisr); DUMP_FIELD(spu, "%d", class_0_pending); + DUMP_FIELD(spu, "0x%lx", class_0_dar); + DUMP_FIELD(spu, "0x%lx", class_0_dsisr); + DUMP_FIELD(spu, "0x%lx", class_1_dar); + DUMP_FIELD(spu, "0x%lx", class_1_dsisr); DUMP_FIELD(spu, "0x%lx", irqs[0]); DUMP_FIELD(spu, "0x%lx", irqs[1]); DUMP_FIELD(spu, "0x%lx", irqs[2]); |