diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2012-02-28 13:48:58 -0600 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2012-02-28 13:48:58 -0600 |
commit | b3950d50cfc343b3e7dc5c69c96a61b182fd1e37 (patch) | |
tree | d54affae2b1e25464493b48aa88cd8d6b4770812 /arch | |
parent | daefd89efc279b142bbb054577c2d706da211723 (diff) | |
parent | 280ad7fda5f95211857fda38960f2b6fdf6edd3e (diff) | |
download | linux-b3950d50cfc343b3e7dc5c69c96a61b182fd1e37.tar.gz linux-b3950d50cfc343b3e7dc5c69c96a61b182fd1e37.tar.bz2 linux-b3950d50cfc343b3e7dc5c69c96a61b182fd1e37.zip |
Merge branch 'irqdomain/next' into gpio/next
Diffstat (limited to 'arch')
138 files changed, 875 insertions, 3627 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index c47d6199b784..f0783be17352 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -51,7 +51,6 @@ union gic_base { }; struct gic_chip_data { - unsigned int irq_offset; union gic_base dist_base; union gic_base cpu_base; #ifdef CONFIG_CPU_PM @@ -61,9 +60,7 @@ struct gic_chip_data { u32 __percpu *saved_ppi_enable; u32 __percpu *saved_ppi_conf; #endif -#ifdef CONFIG_IRQ_DOMAIN - struct irq_domain domain; -#endif + struct irq_domain *domain; unsigned int gic_irqs; #ifdef CONFIG_GIC_NON_BANKED void __iomem *(*get_base)(union gic_base *); @@ -282,7 +279,7 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) irqnr = irqstat & ~0x1c00; if (likely(irqnr > 15 && irqnr < 1021)) { - irqnr = irq_domain_to_irq(&gic->domain, irqnr); + irqnr = irq_find_mapping(gic->domain, irqnr); handle_IRQ(irqnr, regs); continue; } @@ -314,8 +311,8 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) if (gic_irq == 1023) goto out; - cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq); - if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) + cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); + if (unlikely(gic_irq < 32 || gic_irq > 1020)) do_bad_IRQ(cascade_irq, desc); else generic_handle_irq(cascade_irq); @@ -348,10 +345,9 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) static void __init gic_dist_init(struct gic_chip_data *gic) { - unsigned int i, irq; + unsigned int i; u32 cpumask; unsigned int gic_irqs = gic->gic_irqs; - struct irq_domain *domain = &gic->domain; void __iomem *base = gic_data_dist_base(gic); u32 cpu = cpu_logical_map(smp_processor_id()); @@ -386,23 +382,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic) for (i = 32; i < gic_irqs; i += 32) writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); - /* - * Setup the Linux IRQ subsystem. - */ - irq_domain_for_each_irq(domain, i, irq) { - if (i < 32) { - irq_set_percpu_devid(irq); - irq_set_chip_and_handler(irq, &gic_chip, - handle_percpu_devid_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); - } else { - irq_set_chip_and_handler(irq, &gic_chip, - handle_fasteoi_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } - irq_set_chip_data(irq, gic); - } - writel_relaxed(1, base + GIC_DIST_CTRL); } @@ -618,11 +597,27 @@ static void __init gic_pm_init(struct gic_chip_data *gic) } #endif -#ifdef CONFIG_OF -static int gic_irq_domain_dt_translate(struct irq_domain *d, - struct device_node *controller, - const u32 *intspec, unsigned int intsize, - unsigned long *out_hwirq, unsigned int *out_type) +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + if (hw < 32) { + irq_set_percpu_devid(irq); + irq_set_chip_and_handler(irq, &gic_chip, + handle_percpu_devid_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); + } else { + irq_set_chip_and_handler(irq, &gic_chip, + handle_fasteoi_irq); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + irq_set_chip_data(irq, d->host_data); + return 0; +} + +static int gic_irq_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) { if (d->of_node != controller) return -EINVAL; @@ -639,26 +634,23 @@ static int gic_irq_domain_dt_translate(struct irq_domain *d, *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; return 0; } -#endif const struct irq_domain_ops gic_irq_domain_ops = { -#ifdef CONFIG_OF - .dt_translate = gic_irq_domain_dt_translate, -#endif + .map = gic_irq_domain_map, + .xlate = gic_irq_domain_xlate, }; void __init gic_init_bases(unsigned int gic_nr, int irq_start, void __iomem *dist_base, void __iomem *cpu_base, - u32 percpu_offset) + u32 percpu_offset, struct device_node *node) { + irq_hw_number_t hwirq_base; struct gic_chip_data *gic; - struct irq_domain *domain; - int gic_irqs; + int gic_irqs, irq_base; BUG_ON(gic_nr >= MAX_GIC_NR); gic = &gic_data[gic_nr]; - domain = &gic->domain; #ifdef CONFIG_GIC_NON_BANKED if (percpu_offset) { /* Frankein-GIC without banked registers... */ unsigned int cpu; @@ -694,10 +686,10 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, * For primary GICs, skip over SGIs. * For secondary GICs, skip over PPIs, too. */ - domain->hwirq_base = 32; + hwirq_base = 32; if (gic_nr == 0) { if ((irq_start & 31) > 0) { - domain->hwirq_base = 16; + hwirq_base = 16; if (irq_start != -1) irq_start = (irq_start & ~31) + 16; } @@ -713,17 +705,17 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic_irqs = 1020; gic->gic_irqs = gic_irqs; - domain->nr_irq = gic_irqs - domain->hwirq_base; - domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq, - numa_node_id()); - if (IS_ERR_VALUE(domain->irq_base)) { + gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); + if (IS_ERR_VALUE(irq_base)) { WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", irq_start); - domain->irq_base = irq_start; + irq_base = irq_start; } - domain->priv = gic; - domain->ops = &gic_irq_domain_ops; - irq_domain_add(domain); + gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, + hwirq_base, &gic_irq_domain_ops, gic); + if (WARN_ON(!gic->domain)) + return; gic_chip.flags |= gic_arch_extn.flags; gic_dist_init(gic); @@ -768,7 +760,6 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent) void __iomem *dist_base; u32 percpu_offset; int irq; - struct irq_domain *domain = &gic_data[gic_cnt].domain; if (WARN_ON(!node)) return -ENODEV; @@ -782,9 +773,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent) if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) percpu_offset = 0; - domain->of_node = of_node_get(node); - - gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset); + gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); if (parent) { irq = irq_of_parse_and_map(node, 0); diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index dcb004a804c7..7a66311f3066 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c @@ -56,7 +56,7 @@ struct vic_device { u32 int_enable; u32 soft_int; u32 protect; - struct irq_domain domain; + struct irq_domain *domain; }; /* we cannot allocate memory when VICs are initially registered */ @@ -192,14 +192,8 @@ static void __init vic_register(void __iomem *base, unsigned int irq, v->resume_sources = resume_sources; v->irq = irq; vic_id++; - - v->domain.irq_base = irq; - v->domain.nr_irq = 32; -#ifdef CONFIG_OF_IRQ - v->domain.of_node = of_node_get(node); -#endif /* CONFIG_OF */ - v->domain.ops = &irq_domain_simple_ops; - irq_domain_add(&v->domain); + v->domain = irq_domain_add_legacy(node, 32, irq, 0, + &irq_domain_simple_ops, v); } static void vic_ack_irq(struct irq_data *d) @@ -348,7 +342,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, vic_register(base, irq_start, 0, node); } -static void __init __vic_init(void __iomem *base, unsigned int irq_start, +void __init __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node) { @@ -444,7 +438,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) stat = readl_relaxed(vic->base + VIC_IRQ_STATUS); while (stat) { irq = ffs(stat) - 1; - handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs); + handle_IRQ(irq_find_mapping(vic->domain, irq), regs); stat &= ~(1 << irq); handled = 1; } diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 4bdfe0018696..4b1ce6cd477f 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -39,7 +39,7 @@ struct device_node; extern struct irq_chip gic_arch_extn; void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, - u32 offset); + u32 offset, struct device_node *); int gic_of_init(struct device_node *node, struct device_node *parent); void gic_secondary_init(unsigned int); void gic_handle_irq(struct pt_regs *regs); @@ -49,7 +49,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); static inline void gic_init(unsigned int nr, int start, void __iomem *dist , void __iomem *cpu) { - gic_init_bases(nr, start, dist, cpu, 0); + gic_init_bases(nr, start, dist, cpu, 0, NULL); } #endif diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h index f42ebd619590..e14af1a1a320 100644 --- a/arch/arm/include/asm/hardware/vic.h +++ b/arch/arm/include/asm/hardware/vic.h @@ -47,6 +47,8 @@ struct device_node; struct pt_regs; +void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, + u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); int vic_of_init(struct device_node *node, struct device_node *parent); void vic_handle_irq(struct pt_regs *regs); diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 5d3ed7e38561..314d4664eae7 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -198,7 +198,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { pgtable_page_dtor(pte); - tlb_add_flush(tlb, addr); + + /* + * With the classic ARM MMU, a pte page has two corresponding pmd + * entries, each covering 1MB. + */ + addr &= PMD_MASK; + tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); + tlb_add_flush(tlb, addr + SZ_1M); + tlb_remove_page(tlb, pte); } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 3a456c6c7005..be16a48007b4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -790,7 +790,7 @@ __kuser_cmpxchg64: @ 0xffff0f60 smp_dmb arm rsbs r0, r3, #0 @ set returned val and C flag ldmfd sp!, {r4, r5, r6, r7} - bx lr + usr_ret lr #elif !defined(CONFIG_SMP) diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 460bbbb6b885..6933244c68f9 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; /* @@ -579,6 +593,20 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; /* diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index e1d5e1929fbd..e33870ff0ac0 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target, { int ret; struct thread_info *thread = task_thread_info(target); - struct vfp_hard_struct new_vfp = thread->vfpstate.hard; + struct vfp_hard_struct new_vfp; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); + vfp_sync_hwstate(thread); + new_vfp = thread->vfpstate.hard; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpregs, user_fpregs_offset, @@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target, if (ret) return ret; - vfp_sync_hwstate(thread); - thread->vfpstate.hard = new_vfp; vfp_flush_hwstate(thread); + thread->vfpstate.hard = new_vfp; return 0; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 0340224cf73c..9e617bd4a146 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; + vfp_flush_hwstate(thread); + /* * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. @@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - if (!err) - vfp_flush_hwstate(thread); - return err ? -EFAULT : 0; } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 99a572702509..f84dfe67724f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -266,6 +266,7 @@ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; + enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; oops_enter(); @@ -273,7 +274,9 @@ void die(const char *str, struct pt_regs *regs, int err) console_verbose(); bust_spinlocks(1); if (!user_mode(regs)) - report_bug(regs->ARM_pc, regs); + bug_type = report_bug(regs->ARM_pc, regs); + if (bug_type != BUG_TRAP_TYPE_NONE) + str = "Oops - BUG"; ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 1e19691e0406..43a31fb06318 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -10,6 +10,7 @@ #include <asm/page.h> #define PROC_INFO \ + . = ALIGN(4); \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c index 9e5e7552498c..45c97b1ee9b1 100644 --- a/arch/arm/mach-bcmring/arch.c +++ b/arch/arm/mach-bcmring/arch.c @@ -194,6 +194,6 @@ MACHINE_START(BCMRING, "BCMRING") .init_early = bcmring_init_early, .init_irq = bcmring_init_irq, .timer = &bcmring_timer, - .init_machine = bcmring_init_machine + .init_machine = bcmring_init_machine, .restart = bcmring_restart, MACHINE_END diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c index 1a1a27dd5654..1024396797e1 100644 --- a/arch/arm/mach-bcmring/dma.c +++ b/arch/arm/mach-bcmring/dma.c @@ -33,17 +33,11 @@ #include <mach/timer.h> -#include <linux/mm.h> #include <linux/pfn.h> #include <linux/atomic.h> #include <linux/sched.h> #include <mach/dma.h> -/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */ -/* especially since dc4 doesn't use kmalloc'd memory. */ - -#define ALLOW_MAP_OF_KMALLOC_MEMORY 0 - /* ---- Public Variables ------------------------------------------------- */ /* ---- Private Constants and Types -------------------------------------- */ @@ -53,24 +47,12 @@ #define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f) #define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f) -#define DMA_MAP_DEBUG 0 - -#if DMA_MAP_DEBUG -# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args) -#else -# define DMA_MAP_PRINT(fmt, args...) -#endif /* ---- Private Variables ------------------------------------------------ */ static DMA_Global_t gDMA; static struct proc_dir_entry *gDmaDir; -static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0); -static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0); -static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0); -static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0); - #include "dma_device.c" /* ---- Private Function Prototypes -------------------------------------- */ @@ -79,34 +61,6 @@ static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0); /****************************************************************************/ /** -* Displays information for /proc/dma/mem-type -*/ -/****************************************************************************/ - -static int dma_proc_read_mem_type(char *buf, char **start, off_t offset, - int count, int *eof, void *data) -{ - int len = 0; - - len += sprintf(buf + len, "dma_map_mem statistics\n"); - len += - sprintf(buf + len, "coherent: %d\n", - atomic_read(&gDmaStatMemTypeCoherent)); - len += - sprintf(buf + len, "kmalloc: %d\n", - atomic_read(&gDmaStatMemTypeKmalloc)); - len += - sprintf(buf + len, "vmalloc: %d\n", - atomic_read(&gDmaStatMemTypeVmalloc)); - len += - sprintf(buf + len, "user: %d\n", - atomic_read(&gDmaStatMemTypeUser)); - - return len; -} - -/****************************************************************************/ -/** * Displays information for /proc/dma/channels */ /****************************************************************************/ @@ -846,8 +800,6 @@ int dma_init(void) dma_proc_read_channels, NULL); create_proc_read_entry("devices", 0, gDmaDir, dma_proc_read_devices, NULL); - create_proc_read_entry("mem-type", 0, gDmaDir, - dma_proc_read_mem_type, NULL); } out: @@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for. } EXPORT_SYMBOL(dma_set_device_handler); - -/****************************************************************************/ -/** -* Initializes a memory mapping structure -*/ -/****************************************************************************/ - -int dma_init_mem_map(DMA_MemMap_t *memMap) -{ - memset(memMap, 0, sizeof(*memMap)); - - sema_init(&memMap->lock, 1); - - return 0; -} - -EXPORT_SYMBOL(dma_init_mem_map); - -/****************************************************************************/ -/** -* Releases any memory currently being held by a memory mapping structure. -*/ -/****************************************************************************/ - -int dma_term_mem_map(DMA_MemMap_t *memMap) -{ - down(&memMap->lock); /* Just being paranoid */ - - /* Free up any allocated memory */ - - up(&memMap->lock); - memset(memMap, 0, sizeof(*memMap)); - - return 0; -} - -EXPORT_SYMBOL(dma_term_mem_map); - -/****************************************************************************/ -/** -* Looks at a memory address and categorizes it. -* -* @return One of the values from the DMA_MemType_t enumeration. -*/ -/****************************************************************************/ - -DMA_MemType_t dma_mem_type(void *addr) -{ - unsigned long addrVal = (unsigned long)addr; - - if (addrVal >= CONSISTENT_BASE) { - /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */ - - /* dma_alloc_xxx pages are physically and virtually contiguous */ - - return DMA_MEM_TYPE_DMA; - } - - /* Technically, we could add one more classification. Addresses between VMALLOC_END */ - /* and the beginning of the DMA virtual address could be considered to be I/O space. */ - /* Right now, nobody cares about this particular classification, so we ignore it. */ - - if (is_vmalloc_addr(addr)) { - /* Address comes from the vmalloc'd region. Pages are virtually */ - /* contiguous but NOT physically contiguous */ - - return DMA_MEM_TYPE_VMALLOC; - } - - if (addrVal >= PAGE_OFFSET) { - /* PAGE_OFFSET is typically 0xC0000000 */ - - /* kmalloc'd pages are physically contiguous */ - - return DMA_MEM_TYPE_KMALLOC; - } - - return DMA_MEM_TYPE_USER; -} - -EXPORT_SYMBOL(dma_mem_type); - -/****************************************************************************/ -/** -* Looks at a memory address and determines if we support DMA'ing to/from -* that type of memory. -* -* @return boolean - -* return value != 0 means dma supported -* return value == 0 means dma not supported -*/ -/****************************************************************************/ - -int dma_mem_supports_dma(void *addr) -{ - DMA_MemType_t memType = dma_mem_type(addr); - - return (memType == DMA_MEM_TYPE_DMA) -#if ALLOW_MAP_OF_KMALLOC_MEMORY - || (memType == DMA_MEM_TYPE_KMALLOC) -#endif - || (memType == DMA_MEM_TYPE_USER); -} - -EXPORT_SYMBOL(dma_mem_supports_dma); - -/****************************************************************************/ -/** -* Maps in a memory region such that it can be used for performing a DMA. -* -* @return -*/ -/****************************************************************************/ - -int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */ - enum dma_data_direction dir /* Direction that the mapping will be going */ - ) { - int rc; - - down(&memMap->lock); - - DMA_MAP_PRINT("memMap: %p\n", memMap); - - if (memMap->inUse) { - printk(KERN_ERR "%s: memory map %p is already being used\n", - __func__, memMap); - rc = -EBUSY; - goto out; - } - - memMap->inUse = 1; - memMap->dir = dir; - memMap->numRegionsUsed = 0; - - rc = 0; - -out: - - DMA_MAP_PRINT("returning %d", rc); - - up(&memMap->lock); - - return rc; -} - -EXPORT_SYMBOL(dma_map_start); - -/****************************************************************************/ -/** -* Adds a segment of memory to a memory map. Each segment is both -* physically and virtually contiguous. -* -* @return 0 on success, error code otherwise. -*/ -/****************************************************************************/ - -static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */ - DMA_Region_t *region, /* Region that the segment belongs to */ - void *virtAddr, /* Virtual address of the segment being added */ - dma_addr_t physAddr, /* Physical address of the segment being added */ - size_t numBytes /* Number of bytes of the segment being added */ - ) { - DMA_Segment_t *segment; - - DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr, - physAddr, numBytes); - - /* Sanity check */ - - if (((unsigned long)virtAddr < (unsigned long)region->virtAddr) - || (((unsigned long)virtAddr + numBytes)) > - ((unsigned long)region->virtAddr + region->numBytes)) { - printk(KERN_ERR - "%s: virtAddr %p is outside region @ %p len: %d\n", - __func__, virtAddr, region->virtAddr, region->numBytes); - return -EINVAL; - } - - if (region->numSegmentsUsed > 0) { - /* Check to see if this segment is physically contiguous with the previous one */ - - segment = ®ion->segment[region->numSegmentsUsed - 1]; - - if ((segment->physAddr + segment->numBytes) == physAddr) { - /* It is - just add on to the end */ - - DMA_MAP_PRINT("appending %d bytes to last segment\n", - numBytes); - - segment->numBytes += numBytes; - - return 0; - } - } - - /* Reallocate to hold more segments, if required. */ - - if (region->numSegmentsUsed >= region->numSegmentsAllocated) { - DMA_Segment_t *newSegment; - size_t oldSize = - region->numSegmentsAllocated * sizeof(*newSegment); - int newAlloc = region->numSegmentsAllocated + 4; - size_t newSize = newAlloc * sizeof(*newSegment); - - newSegment = kmalloc(newSize, GFP_KERNEL); - if (newSegment == NULL) { - return -ENOMEM; - } - memcpy(newSegment, region->segment, oldSize); - memset(&((uint8_t *) newSegment)[oldSize], 0, - newSize - oldSize); - kfree(region->segment); - - region->numSegmentsAllocated = newAlloc; - region->segment = newSegment; - } - - segment = ®ion->segment[region->numSegmentsUsed]; - region->numSegmentsUsed++; - - segment->virtAddr = virtAddr; - segment->physAddr = physAddr; - segment->numBytes = numBytes; - - DMA_MAP_PRINT("returning success\n"); - - return 0; -} - -/****************************************************************************/ -/** -* Adds a region of memory to a memory map. Each region is virtually -* contiguous, but not necessarily physically contiguous. -* -* @return 0 on success, error code otherwise. -*/ -/****************************************************************************/ - -int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */ - void *mem, /* Virtual address that we want to get a map of */ - size_t numBytes /* Number of bytes being mapped */ - ) { - unsigned long addr = (unsigned long)mem; - unsigned int offset; - int rc = 0; - DMA_Region_t *region; - dma_addr_t physAddr; - - down(&memMap->lock); - - DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes); - - if (!memMap->inUse) { - printk(KERN_ERR "%s: Make sure you call dma_map_start first\n", - __func__); - rc = -EINVAL; - goto out; - } - - /* Reallocate to hold more regions. */ - - if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) { - DMA_Region_t *newRegion; - size_t oldSize = - memMap->numRegionsAllocated * sizeof(*newRegion); - int newAlloc = memMap->numRegionsAllocated + 4; - size_t newSize = newAlloc * sizeof(*newRegion); - - newRegion = kmalloc(newSize, GFP_KERNEL); - if (newRegion == NULL) { - rc = -ENOMEM; - goto out; - } - memcpy(newRegion, memMap->region, oldSize); - memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize); - - kfree(memMap->region); - - memMap->numRegionsAllocated = newAlloc; - memMap->region = newRegion; - } - - region = &memMap->region[memMap->numRegionsUsed]; - memMap->numRegionsUsed++; - - offset = addr & ~PAGE_MASK; - - region->memType = dma_mem_type(mem); - region->virtAddr = mem; - region->numBytes = numBytes; - region->numSegmentsUsed = 0; - region->numLockedPages = 0; - region->lockedPages = NULL; - - switch (region->memType) { - case DMA_MEM_TYPE_VMALLOC: - { - atomic_inc(&gDmaStatMemTypeVmalloc); - - /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */ - - /* vmalloc'd pages are not physically contiguous */ - - rc = -EINVAL; - break; - } - - case DMA_MEM_TYPE_KMALLOC: - { - atomic_inc(&gDmaStatMemTypeKmalloc); - - /* kmalloc'd pages are physically contiguous, so they'll have exactly */ - /* one segment */ - -#if ALLOW_MAP_OF_KMALLOC_MEMORY - physAddr = - dma_map_single(NULL, mem, numBytes, memMap->dir); - rc = dma_map_add_segment(memMap, region, mem, physAddr, - numBytes); -#else - rc = -EINVAL; -#endif - break; - } - - case DMA_MEM_TYPE_DMA: - { - /* dma_alloc_xxx pages are physically contiguous */ - - atomic_inc(&gDmaStatMemTypeCoherent); - - physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset; - - dma_sync_single_for_cpu(NULL, physAddr, numBytes, - memMap->dir); - rc = dma_map_add_segment(memMap, region, mem, physAddr, - numBytes); - break; - } - - case DMA_MEM_TYPE_USER: - { - size_t firstPageOffset; - size_t firstPageSize; - struct page **pages; - struct task_struct *userTask; - - atomic_inc(&gDmaStatMemTypeUser); - -#if 1 - /* If the pages are user pages, then the dma_mem_map_set_user_task function */ - /* must have been previously called. */ - - if (memMap->userTask == NULL) { - printk(KERN_ERR - "%s: must call dma_mem_map_set_user_task when using user-mode memory\n", - __func__); - return -EINVAL; - } - - /* User pages need to be locked. */ - - firstPageOffset = - (unsigned long)region->virtAddr & (PAGE_SIZE - 1); - firstPageSize = PAGE_SIZE - firstPageOffset; - - region->numLockedPages = (firstPageOffset - + region->numBytes + - PAGE_SIZE - 1) / PAGE_SIZE; - pages = - kmalloc(region->numLockedPages * - sizeof(struct page *), GFP_KERNEL); - - if (pages == NULL) { - region->numLockedPages = 0; - return -ENOMEM; - } - - userTask = memMap->userTask; - - down_read(&userTask->mm->mmap_sem); - rc = get_user_pages(userTask, /* task */ - userTask->mm, /* mm */ - (unsigned long)region->virtAddr, /* start */ - region->numLockedPages, /* len */ - memMap->dir == DMA_FROM_DEVICE, /* write */ - 0, /* force */ - pages, /* pages (array of pointers to page) */ - NULL); /* vmas */ - up_read(&userTask->mm->mmap_sem); - - if (rc != region->numLockedPages) { - kfree(pages); - region->numLockedPages = 0; - - if (rc >= 0) { - rc = -EINVAL; - } - } else { - uint8_t *virtAddr = region->virtAddr; - size_t bytesRemaining; - int pageIdx; - - rc = 0; /* Since get_user_pages returns +ve number */ - - region->lockedPages = pages; - - /* We've locked the user pages. Now we need to walk them and figure */ - /* out the physical addresses. */ - - /* The first page may be partial */ - - dma_map_add_segment(memMap, - region, - virtAddr, - PFN_PHYS(page_to_pfn - (pages[0])) + - firstPageOffset, - firstPageSize); - - virtAddr += firstPageSize; - bytesRemaining = - region->numBytes - firstPageSize; - - for (pageIdx = 1; - pageIdx < region->numLockedPages; - pageIdx++) { - size_t bytesThisPage = - (bytesRemaining > - PAGE_SIZE ? PAGE_SIZE : - bytesRemaining); - - DMA_MAP_PRINT - ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n", - pageIdx, pages[pageIdx], - page_to_pfn(pages[pageIdx]), - PFN_PHYS(page_to_pfn - (pages[pageIdx]))); - - dma_map_add_segment(memMap, - region, - virtAddr, - PFN_PHYS(page_to_pfn - (pages - [pageIdx])), - bytesThisPage); - - virtAddr += bytesThisPage; - bytesRemaining -= bytesThisPage; - } - } -#else - printk(KERN_ERR - "%s: User mode pages are not yet supported\n", - __func__); - - /* user pages are not physically contiguous */ - - rc = -EINVAL; -#endif - break; - } - - default: - { - printk(KERN_ERR "%s: Unsupported memory type: %d\n", - __func__, region->memType); - - rc = -EINVAL; - break; - } - } - - if (rc != 0) { - memMap->numRegionsUsed--; - } - -out: - - DMA_MAP_PRINT("returning %d\n", rc); - - up(&memMap->lock); - - return rc; -} - -EXPORT_SYMBOL(dma_map_add_segment); - -/****************************************************************************/ -/** -* Maps in a memory region such that it can be used for performing a DMA. -* -* @return 0 on success, error code otherwise. -*/ -/****************************************************************************/ - -int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */ - void *mem, /* Virtual address that we want to get a map of */ - size_t numBytes, /* Number of bytes being mapped */ - enum dma_data_direction dir /* Direction that the mapping will be going */ - ) { - int rc; - - rc = dma_map_start(memMap, dir); - if (rc == 0) { - rc = dma_map_add_region(memMap, mem, numBytes); - if (rc < 0) { - /* Since the add fails, this function will fail, and the caller won't */ - /* call unmap, so we need to do it here. */ - - dma_unmap(memMap, 0); - } - } - - return rc; -} - -EXPORT_SYMBOL(dma_map_mem); - -/****************************************************************************/ -/** -* Setup a descriptor ring for a given memory map. -* -* It is assumed that the descriptor ring has already been initialized, and -* this routine will only reallocate a new descriptor ring if the existing -* one is too small. -* -* @return 0 on success, error code otherwise. -*/ -/****************************************************************************/ - -int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */ - DMA_MemMap_t *memMap, /* Memory map that will be used */ - dma_addr_t devPhysAddr /* Physical address of device */ - ) { - int rc; - int numDescriptors; - DMA_DeviceAttribute_t *devAttr; - DMA_Region_t *region; - DMA_Segment_t *segment; - dma_addr_t srcPhysAddr; - dma_addr_t dstPhysAddr; - int regionIdx; - int segmentIdx; - - devAttr = &DMA_gDeviceAttribute[dev]; - - down(&memMap->lock); - - /* Figure out how many descriptors we need */ - - numDescriptors = 0; - for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { - region = &memMap->region[regionIdx]; - - for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; - segmentIdx++) { - segment = ®ion->segment[segmentIdx]; - - if (memMap->dir == DMA_TO_DEVICE) { - srcPhysAddr = segment->physAddr; - dstPhysAddr = devPhysAddr; - } else { - srcPhysAddr = devPhysAddr; - dstPhysAddr = segment->physAddr; - } - - rc = - dma_calculate_descriptor_count(dev, srcPhysAddr, - dstPhysAddr, - segment-> - numBytes); - if (rc < 0) { - printk(KERN_ERR - "%s: dma_calculate_descriptor_count failed: %d\n", - __func__, rc); - goto out; - } - numDescriptors += rc; - } - } - - /* Adjust the size of the ring, if it isn't big enough */ - - if (numDescriptors > devAttr->ring.descriptorsAllocated) { - dma_free_descriptor_ring(&devAttr->ring); - rc = - dma_alloc_descriptor_ring(&devAttr->ring, - numDescriptors); - if (rc < 0) { - printk(KERN_ERR - "%s: dma_alloc_descriptor_ring failed: %d\n", - __func__, rc); - goto out; - } - } else { - rc = - dma_init_descriptor_ring(&devAttr->ring, - numDescriptors); - if (rc < 0) { - printk(KERN_ERR - "%s: dma_init_descriptor_ring failed: %d\n", - __func__, rc); - goto out; - } - } - - /* Populate the descriptors */ - - for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { - region = &memMap->region[regionIdx]; - - for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; - segmentIdx++) { - segment = ®ion->segment[segmentIdx]; - - if (memMap->dir == DMA_TO_DEVICE) { - srcPhysAddr = segment->physAddr; - dstPhysAddr = devPhysAddr; - } else { - srcPhysAddr = devPhysAddr; - dstPhysAddr = segment->physAddr; - } - - rc = - dma_add_descriptors(&devAttr->ring, dev, - srcPhysAddr, dstPhysAddr, - segment->numBytes); - if (rc < 0) { - printk(KERN_ERR - "%s: dma_add_descriptors failed: %d\n", - __func__, rc); - goto out; - } - } - } - - rc = 0; - -out: - - up(&memMap->lock); - return rc; -} - -EXPORT_SYMBOL(dma_map_create_descriptor_ring); - -/****************************************************************************/ -/** -* Maps in a memory region such that it can be used for performing a DMA. -* -* @return -*/ -/****************************************************************************/ - -int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ - int dirtied /* non-zero if any of the pages were modified */ - ) { - - int rc = 0; - int regionIdx; - int segmentIdx; - DMA_Region_t *region; - DMA_Segment_t *segment; - - down(&memMap->lock); - - for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) { - region = &memMap->region[regionIdx]; - - for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed; - segmentIdx++) { - segment = ®ion->segment[segmentIdx]; - - switch (region->memType) { - case DMA_MEM_TYPE_VMALLOC: - { - printk(KERN_ERR - "%s: vmalloc'd pages are not yet supported\n", - __func__); - rc = -EINVAL; - goto out; - } - - case DMA_MEM_TYPE_KMALLOC: - { -#if ALLOW_MAP_OF_KMALLOC_MEMORY - dma_unmap_single(NULL, - segment->physAddr, - segment->numBytes, - memMap->dir); -#endif - break; - } - - case DMA_MEM_TYPE_DMA: - { - dma_sync_single_for_cpu(NULL, - segment-> - physAddr, - segment-> - numBytes, - memMap->dir); - break; - } - - case DMA_MEM_TYPE_USER: - { - /* Nothing to do here. */ - - break; - } - - default: - { - printk(KERN_ERR - "%s: Unsupported memory type: %d\n", - __func__, region->memType); - rc = -EINVAL; - goto out; - } - } - - segment->virtAddr = NULL; - segment->physAddr = 0; - segment->numBytes = 0; - } - - if (region->numLockedPages > 0) { - int pageIdx; - - /* Some user pages were locked. We need to go and unlock them now. */ - - for (pageIdx = 0; pageIdx < region->numLockedPages; - pageIdx++) { - struct page *page = - region->lockedPages[pageIdx]; - - if (memMap->dir == DMA_FROM_DEVICE) { - SetPageDirty(page); - } - page_cache_release(page); - } - kfree(region->lockedPages); - region->numLockedPages = 0; - region->lockedPages = NULL; - } - - region->memType = DMA_MEM_TYPE_NONE; - region->virtAddr = NULL; - region->numBytes = 0; - region->numSegmentsUsed = 0; - } - memMap->userTask = NULL; - memMap->numRegionsUsed = 0; - memMap->inUse = 0; - -out: - up(&memMap->lock); - - return rc; -} - -EXPORT_SYMBOL(dma_unmap); diff --git a/arch/arm/mach-bcmring/include/mach/dma.h b/arch/arm/mach-bcmring/include/mach/dma.h index 1f2c5319c056..72543781207b 100644 --- a/arch/arm/mach-bcmring/include/mach/dma.h +++ b/arch/arm/mach-bcmring/include/mach/dma.h @@ -26,15 +26,9 @@ /* ---- Include Files ---------------------------------------------------- */ #include <linux/kernel.h> -#include <linux/wait.h> #include <linux/semaphore.h> #include <csp/dmacHw.h> #include <mach/timer.h> -#include <linux/scatterlist.h> -#include <linux/dma-mapping.h> -#include <linux/mm.h> -#include <linux/vmalloc.h> -#include <linux/pagemap.h> /* ---- Constants and Types ---------------------------------------------- */ @@ -113,78 +107,6 @@ typedef struct { /**************************************************************************** * -* The DMA_MemType_t and DMA_MemMap_t are helper structures used to setup -* DMA chains from a variety of memory sources. -* -*****************************************************************************/ - -#define DMA_MEM_MAP_MIN_SIZE 4096 /* Pages less than this size are better */ - /* off not being DMA'd. */ - -typedef enum { - DMA_MEM_TYPE_NONE, /* Not a valid setting */ - DMA_MEM_TYPE_VMALLOC, /* Memory came from vmalloc call */ - DMA_MEM_TYPE_KMALLOC, /* Memory came from kmalloc call */ - DMA_MEM_TYPE_DMA, /* Memory came from dma_alloc_xxx call */ - DMA_MEM_TYPE_USER, /* Memory came from user space. */ - -} DMA_MemType_t; - -/* A segment represents a physically and virtually contiguous chunk of memory. */ -/* i.e. each segment can be DMA'd */ -/* A user of the DMA code will add memory regions. Each region may need to be */ -/* represented by one or more segments. */ - -typedef struct { - void *virtAddr; /* Virtual address used for this segment */ - dma_addr_t physAddr; /* Physical address this segment maps to */ - size_t numBytes; /* Size of the segment, in bytes */ - -} DMA_Segment_t; - -/* A region represents a virtually contiguous chunk of memory, which may be */ -/* made up of multiple segments. */ - -typedef struct { - DMA_MemType_t memType; - void *virtAddr; - size_t numBytes; - - /* Each region (virtually contiguous) consists of one or more segments. Each */ - /* segment is virtually and physically contiguous. */ - - int numSegmentsUsed; - int numSegmentsAllocated; - DMA_Segment_t *segment; - - /* When a region corresponds to user memory, we need to lock all of the pages */ - /* down before we can figure out the physical addresses. The lockedPage array contains */ - /* the pages that were locked, and which subsequently need to be unlocked once the */ - /* memory is unmapped. */ - - unsigned numLockedPages; - struct page **lockedPages; - -} DMA_Region_t; - -typedef struct { - int inUse; /* Is this mapping currently being used? */ - struct semaphore lock; /* Acquired when using this structure */ - enum dma_data_direction dir; /* Direction this transfer is intended for */ - - /* In the event that we're mapping user memory, we need to know which task */ - /* the memory is for, so that we can obtain the correct mm locks. */ - - struct task_struct *userTask; - - int numRegionsUsed; - int numRegionsAllocated; - DMA_Region_t *region; - -} DMA_MemMap_t; - -/**************************************************************************** -* * The DMA_DeviceAttribute_t contains information which describes a * particular DMA device (or peripheral). * @@ -570,124 +492,6 @@ int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */ /****************************************************************************/ /** -* Initializes a DMA_MemMap_t data structure -*/ -/****************************************************************************/ - -int dma_init_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */ - ); - -/****************************************************************************/ -/** -* Releases any memory currently being held by a memory mapping structure. -*/ -/****************************************************************************/ - -int dma_term_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */ - ); - -/****************************************************************************/ -/** -* Looks at a memory address and categorizes it. -* -* @return One of the values from the DMA_MemType_t enumeration. -*/ -/****************************************************************************/ - -DMA_MemType_t dma_mem_type(void *addr); - -/****************************************************************************/ -/** -* Sets the process (aka userTask) associated with a mem map. This is -* required if user-mode segments will be added to the mapping. -*/ -/****************************************************************************/ - -static inline void dma_mem_map_set_user_task(DMA_MemMap_t *memMap, - struct task_struct *task) -{ - memMap->userTask = task; -} - -/****************************************************************************/ -/** -* Looks at a memory address and determines if we support DMA'ing to/from -* that type of memory. -* -* @return boolean - -* return value != 0 means dma supported -* return value == 0 means dma not supported -*/ -/****************************************************************************/ - -int dma_mem_supports_dma(void *addr); - -/****************************************************************************/ -/** -* Initializes a memory map for use. Since this function acquires a -* sempaphore within the memory map, it is VERY important that dma_unmap -* be called when you're finished using the map. -*/ -/****************************************************************************/ - -int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */ - enum dma_data_direction dir /* Direction that the mapping will be going */ - ); - -/****************************************************************************/ -/** -* Adds a segment of memory to a memory map. -* -* @return 0 on success, error code otherwise. -*/ -/****************************************************************************/ - -int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */ - void *mem, /* Virtual address that we want to get a map of */ - size_t numBytes /* Number of bytes being mapped */ - ); - -/****************************************************************************/ -/** -* Creates a descriptor ring from a memory mapping. -* -* @return 0 on success, error code otherwise. -*/ -/****************************************************************************/ - -int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */ - DMA_MemMap_t *memMap, /* Memory map that will be used */ - dma_addr_t devPhysAddr /* Physical address of device */ - ); - -/****************************************************************************/ -/** -* Maps in a memory region such that it can be used for performing a DMA. -* -* @return -*/ -/****************************************************************************/ - -int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */ - void *addr, /* Virtual address that we want to get a map of */ - size_t count, /* Number of bytes being mapped */ - enum dma_data_direction dir /* Direction that the mapping will be going */ - ); - -/****************************************************************************/ -/** -* Maps in a memory region such that it can be used for performing a DMA. -* -* @return -*/ -/****************************************************************************/ - -int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */ - int dirtied /* non-zero if any of the pages were modified */ - ); - -/****************************************************************************/ -/** * Initiates a transfer when the descriptors have already been setup. * * This is a special case, and normally, the dma_transfer_xxx functions should diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 6b22b543a83f..d5088900af6c 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -44,7 +44,7 @@ #include <mach/aemif.h> #include <mach/spi.h> -#define DA850_EVM_PHY_ID "0:00" +#define DA850_EVM_PHY_ID "davinci_mdio-0:00" #define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8) #define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15) diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 346e1de2f5a8..849311d3cb7c 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -54,7 +54,7 @@ static inline int have_tvp7002(void) return 0; } -#define DM365_EVM_PHY_ID "0:01" +#define DM365_EVM_PHY_ID "davinci_mdio-0:01" /* * A MAX-II CPLD is used for various board control functions. */ diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a64b49cfedca..1247ecdcf752 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -40,7 +40,7 @@ #include <mach/usb.h> #include <mach/aemif.h> -#define DM644X_EVM_PHY_ID "0:01" +#define DM644X_EVM_PHY_ID "davinci_mdio-0:01" #define LXT971_PHY_ID (0x001378e2) #define LXT971_PHY_MASK (0xfffffff0) diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 64017558860b..872ac69fa049 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -736,7 +736,7 @@ static struct davinci_uart_config uart_config __initdata = { .enabled_uarts = (1 << 0), }; -#define DM646X_EVM_PHY_ID "0:01" +#define DM646X_EVM_PHY_ID "davinci_mdio-0:01" /* * The following EDMA channels/slots are not being used by drivers (for * example: Timer, GPIO, UART events etc) on dm646x, hence they are being diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 6c4a16415d47..8d34f513d415 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -39,7 +39,7 @@ #include <mach/mmc.h> #include <mach/usb.h> -#define NEUROS_OSD2_PHY_ID "0:01" +#define NEUROS_OSD2_PHY_ID "davinci_mdio-0:01" #define LXT971_PHY_ID 0x001378e2 #define LXT971_PHY_MASK 0xfffffff0 diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index e7c0c7c53493..45e815760a27 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -21,7 +21,7 @@ #include <mach/da8xx.h> #include <mach/mux.h> -#define HAWKBOARD_PHY_ID "0:07" +#define HAWKBOARD_PHY_ID "davinci_mdio-0:07" #define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12) #define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13) diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index 0b136a831c59..31da3c5b2ba3 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c @@ -42,7 +42,7 @@ #include <mach/mux.h> #include <mach/usb.h> -#define SFFSDR_PHY_ID "0:01" +#define SFFSDR_PHY_ID "davinci_mdio-0:01" static struct mtd_partition davinci_sffsdr_nandflash_partition[] = { /* U-Boot Environment: Block 0 * UBL: Block 1 diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index 0ed7fdb64efb..992c4c410185 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c @@ -153,34 +153,6 @@ static struct clk pll1_sysclk3 = { .div_reg = PLLDIV3, }; -static struct clk pll1_sysclk4 = { - .name = "pll1_sysclk4", - .parent = &pll1_clk, - .flags = CLK_PLL, - .div_reg = PLLDIV4, -}; - -static struct clk pll1_sysclk5 = { - .name = "pll1_sysclk5", - .parent = &pll1_clk, - .flags = CLK_PLL, - .div_reg = PLLDIV5, -}; - -static struct clk pll1_sysclk6 = { - .name = "pll0_sysclk6", - .parent = &pll0_clk, - .flags = CLK_PLL, - .div_reg = PLLDIV6, -}; - -static struct clk pll1_sysclk7 = { - .name = "pll1_sysclk7", - .parent = &pll1_clk, - .flags = CLK_PLL, - .div_reg = PLLDIV7, -}; - static struct clk i2c0_clk = { .name = "i2c0", .parent = &pll0_aux_clk, @@ -397,10 +369,6 @@ static struct clk_lookup da850_clks[] = { CLK(NULL, "pll1_aux", &pll1_aux_clk), CLK(NULL, "pll1_sysclk2", &pll1_sysclk2), CLK(NULL, "pll1_sysclk3", &pll1_sysclk3), - CLK(NULL, "pll1_sysclk4", &pll1_sysclk4), - CLK(NULL, "pll1_sysclk5", &pll1_sysclk5), - CLK(NULL, "pll1_sysclk6", &pll1_sysclk6), - CLK(NULL, "pll1_sysclk7", &pll1_sysclk7), CLK("i2c_davinci.1", NULL, &i2c0_clk), CLK(NULL, "timer0", &timerp64_0_clk), CLK("watchdog", NULL, &timerp64_1_clk), diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index c59e18871006..6de298c5d2d3 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -402,7 +402,7 @@ void __init exynos4_init_irq(void) gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000; if (!of_have_populated_dt()) - gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset); + gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL); #ifdef CONFIG_OF else of_irq_init(exynos4_dt_irq_match); diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c index e6bad17b908c..1e03ef42faa0 100644 --- a/arch/arm/mach-imx/imx51-dt.c +++ b/arch/arm/mach-imx/imx51-dt.c @@ -47,7 +47,7 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = { static int __init imx51_tzic_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { - irq_domain_add_simple(np, 0); + irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL); return 0; } @@ -57,7 +57,7 @@ static int __init imx51_gpio_add_irq_domain(struct device_node *np, static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; gpio_irq_base -= 32; - irq_domain_add_simple(np, gpio_irq_base); + irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL); return 0; } diff --git a/arch/arm/mach-imx/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c index 05ebb3e68679..fd5be0f20fbb 100644 --- a/arch/arm/mach-imx/imx53-dt.c +++ b/arch/arm/mach-imx/imx53-dt.c @@ -51,7 +51,7 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = { static int __init imx53_tzic_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { - irq_domain_add_simple(np, 0); + irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL); return 0; } @@ -61,7 +61,7 @@ static int __init imx53_gpio_add_irq_domain(struct device_node *np, static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; gpio_irq_base -= 32; - irq_domain_add_simple(np, gpio_irq_base); + irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL); return 0; } diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index c25728106917..6075d4d62dd6 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -97,7 +97,8 @@ static int __init imx6q_gpio_add_irq_domain(struct device_node *np, static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; gpio_irq_base -= 32; - irq_domain_add_simple(np, gpio_irq_base); + irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, + NULL); return 0; } diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c index 0a113424632c..962e71169750 100644 --- a/arch/arm/mach-msm/board-msm8x60.c +++ b/arch/arm/mach-msm/board-msm8x60.c @@ -80,12 +80,8 @@ static struct of_device_id msm_dt_gic_match[] __initdata = { static void __init msm8x60_dt_init(void) { - struct device_node *node; - - node = of_find_matching_node_by_address(NULL, msm_dt_gic_match, - MSM8X60_QGIC_DIST_PHYS); - if (node) - irq_domain_add_simple(node, GIC_SPI_START); + irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS, + GIC_SPI_START); if (of_machine_is_compatible("qcom,msm8660-surf")) { printk(KERN_INFO "Init surf UART registers\n"); diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 41e6612ecbaf..d965da45160e 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -213,13 +213,12 @@ config MACH_OMAP3_PANDORA depends on ARCH_OMAP3 default y select OMAP_PACKAGE_CBB - select REGULATOR_FIXED_VOLTAGE + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_OMAP3_TOUCHBOOK bool "OMAP3 Touch Book" depends on ARCH_OMAP3 default y - select BACKLIGHT_CLASS_DEVICE config MACH_OMAP_3430SDP bool "OMAP 3430 SDP board" @@ -265,7 +264,7 @@ config MACH_OMAP_ZOOM2 select SERIAL_8250 select SERIAL_CORE_CONSOLE select SERIAL_8250_CONSOLE - select REGULATOR_FIXED_VOLTAGE + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_OMAP_ZOOM3 bool "OMAP3630 Zoom3 board" @@ -275,7 +274,7 @@ config MACH_OMAP_ZOOM3 select SERIAL_8250 select SERIAL_CORE_CONSOLE select SERIAL_8250_CONSOLE - select REGULATOR_FIXED_VOLTAGE + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_CM_T35 bool "CompuLab CM-T35/CM-T3730 modules" @@ -334,7 +333,7 @@ config MACH_OMAP_4430SDP depends on ARCH_OMAP4 select OMAP_PACKAGE_CBL select OMAP_PACKAGE_CBS - select REGULATOR_FIXED_VOLTAGE + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_OMAP4_PANDA bool "OMAP4 Panda Board" @@ -342,7 +341,7 @@ config MACH_OMAP4_PANDA depends on ARCH_OMAP4 select OMAP_PACKAGE_CBL select OMAP_PACKAGE_CBS - select REGULATOR_FIXED_VOLTAGE + select REGULATOR_FIXED_VOLTAGE if REGULATOR config OMAP3_EMU bool "OMAP3 debugging peripherals" diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 39fba9df17fb..4e9071589bfb 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -52,8 +52,9 @@ #define ETH_KS8851_QUART 138 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 #define OMAP4_SFH7741_ENABLE_GPIO 188 -#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ +#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ +#define HDMI_GPIO_HPD 63 /* Hotplug detect */ #define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */ #define DLP_POWER_ON_GPIO 40 @@ -603,8 +604,9 @@ static void __init omap_sfh7741prox_init(void) } static struct gpio sdp4430_hdmi_gpios[] = { - { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, + { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" }, { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, + { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" }, }; static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) @@ -621,8 +623,7 @@ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev) { - gpio_free(HDMI_GPIO_LS_OE); - gpio_free(HDMI_GPIO_HPD); + gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios)); } static struct nokia_dsi_panel_data dsi1_panel = { @@ -738,6 +739,10 @@ static void sdp4430_lcd_init(void) pr_err("%s: Could not get lcd2_reset_gpio\n", __func__); } +static struct omap_dss_hdmi_data sdp4430_hdmi_data = { + .hpd_gpio = HDMI_GPIO_HPD, +}; + static struct omap_dss_device sdp4430_hdmi_device = { .name = "hdmi", .driver_name = "hdmi_panel", @@ -745,6 +750,7 @@ static struct omap_dss_device sdp4430_hdmi_device = { .platform_enable = sdp4430_panel_enable_hdmi, .platform_disable = sdp4430_panel_disable_hdmi, .channel = OMAP_DSS_CHANNEL_DIGIT, + .data = &sdp4430_hdmi_data, }; static struct picodlp_panel_data sdp4430_picodlp_pdata = { @@ -808,7 +814,7 @@ static struct omap_dss_board_info sdp4430_dss_data = { .default_device = &sdp4430_lcd_device, }; -static void omap_4430sdp_display_init(void) +static void __init omap_4430sdp_display_init(void) { int r; @@ -829,6 +835,10 @@ static void omap_4430sdp_display_init(void) omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP); else omap_hdmi_init(0); + + omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN); } #ifdef CONFIG_OMAP_MUX @@ -841,7 +851,7 @@ static struct omap_board_mux board_mux[] __initdata = { #define board_mux NULL #endif -static void omap4_sdp4430_wifi_mux_init(void) +static void __init omap4_sdp4430_wifi_mux_init(void) { omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT | OMAP_PIN_OFF_WAKEUPENABLE); @@ -868,12 +878,17 @@ static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = { .board_tcxo_clock = WL12XX_TCXOCLOCK_26, }; -static void omap4_sdp4430_wifi_init(void) +static void __init omap4_sdp4430_wifi_init(void) { + int ret; + omap4_sdp4430_wifi_mux_init(); - if (wl12xx_set_platform_data(&omap4_sdp4430_wlan_data)) - pr_err("Error setting wl12xx data\n"); - platform_device_register(&omap_vwlan_device); + ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data); + if (ret) + pr_err("Error setting wl12xx data: %d\n", ret); + ret = platform_device_register(&omap_vwlan_device); + if (ret) + pr_err("Error registering wl12xx device: %d\n", ret); } static void __init omap_4430sdp_init(void) diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index d58756060483..00b1d024fa87 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -67,7 +67,7 @@ static void __init omap_generic_init(void) { struct device_node *node = of_find_matching_node(NULL, intc_match); if (node) - irq_domain_add_simple(node, 0); + irq_domain_add_legacy(node, 32, 0, 0, &irq_domain_simple_ops, NULL); omap_sdrc_init(NULL, NULL); diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 003fe34c9343..c775bead1497 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -617,6 +617,21 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = { { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" }, }; +static void __init omap3_evm_wl12xx_init(void) +{ +#ifdef CONFIG_WL12XX_PLATFORM_DATA + int ret; + + /* WL12xx WLAN Init */ + ret = wl12xx_set_platform_data(&omap3evm_wlan_data); + if (ret) + pr_err("error setting wl12xx data: %d\n", ret); + ret = platform_device_register(&omap3evm_wlan_regulator); + if (ret) + pr_err("error registering wl12xx device: %d\n", ret); +#endif +} + static void __init omap3_evm_init(void) { omap3_evm_get_revision(); @@ -665,13 +680,7 @@ static void __init omap3_evm_init(void) omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL); omap3evm_init_smsc911x(); omap3_evm_display_init(); - -#ifdef CONFIG_WL12XX_PLATFORM_DATA - /* WL12xx WLAN Init */ - if (wl12xx_set_platform_data(&omap3evm_wlan_data)) - pr_err("error setting wl12xx data\n"); - platform_device_register(&omap3evm_wlan_regulator); -#endif + omap3_evm_wl12xx_init(); } MACHINE_START(OMAP3EVM, "OMAP3 EVM") diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index 30ad40db2cf3..28fc271f7031 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -51,8 +51,9 @@ #define GPIO_HUB_NRESET 62 #define GPIO_WIFI_PMENA 43 #define GPIO_WIFI_IRQ 53 -#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ +#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ +#define HDMI_GPIO_HPD 63 /* Hotplug detect */ /* wl127x BT, FM, GPS connectivity chip */ static int wl1271_gpios[] = {46, -1, -1}; @@ -413,8 +414,9 @@ int __init omap4_panda_dvi_init(void) } static struct gpio panda_hdmi_gpios[] = { - { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, + { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" }, { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, + { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" }, }; static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) @@ -431,10 +433,13 @@ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev) { - gpio_free(HDMI_GPIO_LS_OE); - gpio_free(HDMI_GPIO_HPD); + gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios)); } +static struct omap_dss_hdmi_data omap4_panda_hdmi_data = { + .hpd_gpio = HDMI_GPIO_HPD, +}; + static struct omap_dss_device omap4_panda_hdmi_device = { .name = "hdmi", .driver_name = "hdmi_panel", @@ -442,6 +447,7 @@ static struct omap_dss_device omap4_panda_hdmi_device = { .platform_enable = omap4_panda_panel_enable_hdmi, .platform_disable = omap4_panda_panel_disable_hdmi, .channel = OMAP_DSS_CHANNEL_DIGIT, + .data = &omap4_panda_hdmi_data, }; static struct omap_dss_device *omap4_panda_dss_devices[] = { @@ -473,18 +479,24 @@ void omap4_panda_display_init(void) omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP); else omap_hdmi_init(0); + + omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN); } static void __init omap4_panda_init(void) { int package = OMAP_PACKAGE_CBS; + int ret; if (omap_rev() == OMAP4430_REV_ES1_0) package = OMAP_PACKAGE_CBL; omap4_mux_init(board_mux, NULL, package); - if (wl12xx_set_platform_data(&omap_panda_wlan_data)) - pr_err("error setting wl12xx data\n"); + ret = wl12xx_set_platform_data(&omap_panda_wlan_data); + if (ret) + pr_err("error setting wl12xx data: %d\n", ret); omap4_panda_i2c_init(); platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices)); diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c index 8d7ce11cfeaf..c126461836ac 100644 --- a/arch/arm/mach-omap2/board-zoom-peripherals.c +++ b/arch/arm/mach-omap2/board-zoom-peripherals.c @@ -296,8 +296,10 @@ static void enable_board_wakeup_source(void) void __init zoom_peripherals_init(void) { - if (wl12xx_set_platform_data(&omap_zoom_wlan_data)) - pr_err("error setting wl12xx data\n"); + int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data); + + if (ret) + pr_err("error setting wl12xx data: %d\n", ret); omap_i2c_init(); platform_device_register(&omap_vwlan_device); diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 0b510ad01a00..283d11eae693 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -405,6 +405,7 @@ static int omap_mcspi_init(struct omap_hwmod *oh, void *unused) break; default: pr_err("Invalid McSPI Revision value\n"); + kfree(pdata); return -EINVAL; } diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 3c446d1a1781..3677b1f58b85 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -103,12 +103,8 @@ static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags) u32 reg; u16 control_i2c_1; - /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */ - omap_mux_init_signal("hdmi_hpd", - OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hdmi_cec", OMAP_PIN_INPUT_PULLUP); - /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */ omap_mux_init_signal("hdmi_ddc_scl", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hdmi_ddc_sda", diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 130034bf01d5..dfffbbf4c009 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval) case GPMC_CONFIG_DEV_SIZE: regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); + + /* clear 2 target bits */ + regval &= ~GPMC_CONFIG1_DEVICESIZE(3); + + /* set the proper value */ regval |= GPMC_CONFIG1_DEVICESIZE(wval); + gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval); break; diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index bd844af13af5..b40c28895298 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -175,14 +175,15 @@ static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc) { u32 reg; - if (mmc->slots[0].internal_clock) { - reg = omap_ctrl_readl(control_devconf1_offset); + reg = omap_ctrl_readl(control_devconf1_offset); + if (mmc->slots[0].internal_clock) reg |= OMAP2_MMCSDIO2ADPCLKISEL; - omap_ctrl_writel(reg, control_devconf1_offset); - } + else + reg &= ~OMAP2_MMCSDIO2ADPCLKISEL; + omap_ctrl_writel(reg, control_devconf1_offset); } -static void hsmmc23_before_set_reg(struct device *dev, int slot, +static void hsmmc2_before_set_reg(struct device *dev, int slot, int power_on, int vdd) { struct omap_mmc_platform_data *mmc = dev->platform_data; @@ -292,8 +293,8 @@ static inline void omap_hsmmc_mux(struct omap_mmc_platform_data *mmc_controller, } } -static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, - struct omap_mmc_platform_data *mmc) +static int omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, + struct omap_mmc_platform_data *mmc) { char *hc_name; @@ -407,14 +408,13 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, c->caps &= ~MMC_CAP_8_BIT_DATA; c->caps |= MMC_CAP_4_BIT_DATA; } - /* FALLTHROUGH */ - case 3: if (mmc->slots[0].features & HSMMC_HAS_PBIAS) { /* off-chip level shifting, or none */ - mmc->slots[0].before_set_reg = hsmmc23_before_set_reg; + mmc->slots[0].before_set_reg = hsmmc2_before_set_reg; mmc->slots[0].after_set_reg = NULL; } break; + case 3: case 4: case 5: mmc->slots[0].before_set_reg = NULL; @@ -430,7 +430,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, #define MAX_OMAP_MMC_HWMOD_NAME_LEN 16 -void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr) +void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr) { struct omap_hwmod *oh; struct platform_device *pdev; @@ -487,7 +487,7 @@ done: kfree(mmc_data); } -void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers) +void omap2_hsmmc_init(struct omap2_hsmmc_info *controllers) { u32 reg; diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 3f174d51f67f..eb50c29fb644 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -388,7 +388,7 @@ static void __init omap_hwmod_init_postsetup(void) omap_pm_if_early_init(); } -#ifdef CONFIG_ARCH_OMAP2 +#ifdef CONFIG_SOC_OMAP2420 void __init omap2420_init_early(void) { omap2_set_globals_242x(); @@ -400,7 +400,9 @@ void __init omap2420_init_early(void) omap_hwmod_init_postsetup(); omap2420_clk_init(); } +#endif +#ifdef CONFIG_SOC_OMAP2430 void __init omap2430_init_early(void) { omap2_set_globals_243x(); diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index e1cc75d1a57a..fb8bc9fa43b1 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -100,8 +100,8 @@ void omap_mux_write_array(struct omap_mux_partition *partition, static char *omap_mux_options; -static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition, - int gpio, int val) +static int _omap_mux_init_gpio(struct omap_mux_partition *partition, + int gpio, int val) { struct omap_mux_entry *e; struct omap_mux *gpio_mux = NULL; @@ -145,7 +145,7 @@ static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition, return 0; } -int __init omap_mux_init_gpio(int gpio, int val) +int omap_mux_init_gpio(int gpio, int val) { struct omap_mux_partition *partition; int ret; @@ -159,9 +159,9 @@ int __init omap_mux_init_gpio(int gpio, int val) return -ENODEV; } -static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, - const char *muxname, - struct omap_mux **found_mux) +static int _omap_mux_get_by_name(struct omap_mux_partition *partition, + const char *muxname, + struct omap_mux **found_mux) { struct omap_mux *mux = NULL; struct omap_mux_entry *e; @@ -240,7 +240,7 @@ omap_mux_get_by_name(const char *muxname, return -ENODEV; } -int __init omap_mux_init_signal(const char *muxname, int val) +int omap_mux_init_signal(const char *muxname, int val) { struct omap_mux_partition *partition = NULL; struct omap_mux *mux = NULL; @@ -1094,8 +1094,8 @@ static void omap_mux_init_package(struct omap_mux *superset, omap_mux_package_init_balls(package_balls, superset); } -static void omap_mux_init_signals(struct omap_mux_partition *partition, - struct omap_board_mux *board_mux) +static void __init omap_mux_init_signals(struct omap_mux_partition *partition, + struct omap_board_mux *board_mux) { omap_mux_set_cmdline_signals(); omap_mux_write_array(partition, board_mux); @@ -1109,8 +1109,8 @@ static void omap_mux_init_package(struct omap_mux *superset, { } -static void omap_mux_init_signals(struct omap_mux_partition *partition, - struct omap_board_mux *board_mux) +static void __init omap_mux_init_signals(struct omap_mux_partition *partition, + struct omap_board_mux *board_mux) { } diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index b13ef7ef5ef4..503ac777a2ba 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S @@ -18,6 +18,7 @@ #include <linux/linkage.h> #include <linux/init.h> + __CPUINIT /* * OMAP4 specific entry point for secondary CPU to jump from ROM * code. This routine also provides a holding flag into which diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 5192cabb40ed..eba6cd3816f5 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1517,8 +1517,8 @@ static int _enable(struct omap_hwmod *oh) if (oh->_state != _HWMOD_STATE_INITIALIZED && oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_DISABLED) { - WARN(1, "omap_hwmod: %s: enabled state can only be entered " - "from initialized, idle, or disabled state\n", oh->name); + WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n", + oh->name); return -EINVAL; } @@ -1600,8 +1600,8 @@ static int _idle(struct omap_hwmod *oh) pr_debug("omap_hwmod: %s: idling\n", oh->name); if (oh->_state != _HWMOD_STATE_ENABLED) { - WARN(1, "omap_hwmod: %s: idle state can only be entered from " - "enabled state\n", oh->name); + WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n", + oh->name); return -EINVAL; } @@ -1682,8 +1682,8 @@ static int _shutdown(struct omap_hwmod *oh) if (oh->_state != _HWMOD_STATE_IDLE && oh->_state != _HWMOD_STATE_ENABLED) { - WARN(1, "omap_hwmod: %s: disabled state can only be entered " - "from idle, or enabled state\n", oh->name); + WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n", + oh->name); return -EINVAL; } @@ -2240,8 +2240,8 @@ void omap_hwmod_ocp_barrier(struct omap_hwmod *oh) BUG_ON(!oh); if (!oh->class->sysc || !oh->class->sysc->sysc_flags) { - WARN(1, "omap_device: %s: OCP barrier impossible due to " - "device configuration\n", oh->name); + WARN(1, "omap_device: %s: OCP barrier impossible due to device configuration\n", + oh->name); return; } diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c index c11273da5dcc..f08e442af397 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c @@ -56,27 +56,6 @@ struct omap_hwmod_class omap2_dss_hwmod_class = { }; /* - * 'dispc' class - * display controller - */ - -static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = { - .rev_offs = 0x0000, - .sysc_offs = 0x0010, - .syss_offs = 0x0014, - .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | - SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), - .sysc_fields = &omap_hwmod_sysc_type1, -}; - -struct omap_hwmod_class omap2_dispc_hwmod_class = { - .name = "dispc", - .sysc = &omap2_dispc_sysc, -}; - -/* * 'rfbi' class * remote frame buffer interface */ diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 177dee20faef..2a6729741b06 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c @@ -28,6 +28,28 @@ struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = { { .name = "dispc", .dma_req = 5 }, { .dma_req = -1 } }; + +/* + * 'dispc' class + * display controller + */ + +static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | + SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +struct omap_hwmod_class omap2_dispc_hwmod_class = { + .name = "dispc", + .sysc = &omap2_dispc_sysc, +}; + /* OMAP2xxx Timer Common */ static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = { .rev_offs = 0x0000, diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 5324e8d93bc0..3c8dd928628e 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1480,6 +1480,28 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = { .masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters), }; +/* + * 'dispc' class + * display controller + */ + +static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | + SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | + SYSC_HAS_ENAWAKEUP), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap3_dispc_hwmod_class = { + .name = "dispc", + .sysc = &omap3_dispc_sysc, +}; + /* l4_core -> dss_dispc */ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = { .master = &omap3xxx_l4_core_hwmod, @@ -1503,7 +1525,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = { static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { .name = "dss_dispc", - .class = &omap2_dispc_hwmod_class, + .class = &omap3_dispc_hwmod_class, .mpu_irqs = omap2_dispc_irqs, .main_clk = "dss1_alwon_fck", .prcm = { @@ -3523,12 +3545,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { &omap3xxx_uart2_hwmod, &omap3xxx_uart3_hwmod, - /* dss class */ - &omap3xxx_dss_dispc_hwmod, - &omap3xxx_dss_dsi1_hwmod, - &omap3xxx_dss_rfbi_hwmod, - &omap3xxx_dss_venc_hwmod, - /* i2c class */ &omap3xxx_i2c1_hwmod, &omap3xxx_i2c2_hwmod, @@ -3635,6 +3651,15 @@ static __initdata struct omap_hwmod *am35xx_hwmods[] = { NULL }; +static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = { + /* dss class */ + &omap3xxx_dss_dispc_hwmod, + &omap3xxx_dss_dsi1_hwmod, + &omap3xxx_dss_rfbi_hwmod, + &omap3xxx_dss_venc_hwmod, + NULL +}; + int __init omap3xxx_hwmod_init(void) { int r; @@ -3708,6 +3733,21 @@ int __init omap3xxx_hwmod_init(void) if (h) r = omap_hwmod_register(h); + if (r < 0) + return r; + + /* + * DSS code presumes that dss_core hwmod is handled first, + * _before_ any other DSS related hwmods so register common + * DSS hwmods last to ensure that dss_core is already registered. + * Otherwise some change things may happen, for ex. if dispc + * is handled before dss_core and DSS is enabled in bootloader + * DIPSC will be reset with outputs enabled which sometimes leads + * to unrecoverable L3 error. + * XXX The long-term fix to this is to ensure modules are set up + * in dependency order in the hwmod core code. + */ + r = omap_hwmod_register(omap3xxx_dss_hwmods); return r; } diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index f9f151081760..ef0524c10a84 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -1031,6 +1031,7 @@ static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = { static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = { { + .name = "mpu", .pa_start = 0x4012e000, .pa_end = 0x4012e07f, .flags = ADDR_TYPE_RT @@ -1049,6 +1050,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = { static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = { { + .name = "dma", .pa_start = 0x4902e000, .pa_end = 0x4902e07f, .flags = ADDR_TYPE_RT diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c index c1c4d86a79a8..9ce765407ad5 100644 --- a/arch/arm/mach-omap2/prm2xxx_3xxx.c +++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c @@ -19,6 +19,7 @@ #include "common.h" #include <plat/cpu.h> #include <plat/prcm.h> +#include <plat/irqs.h> #include "vp.h" diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 33dd655e6aab..a1d6154dc120 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -19,6 +19,7 @@ #include "common.h" #include <plat/cpu.h> +#include <plat/irqs.h> #include <plat/prcm.h> #include "vp.h" diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 247d89478f24..f590afc1f673 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -107,18 +107,18 @@ static void omap_uart_set_noidle(struct platform_device *pdev) omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO); } -static void omap_uart_set_forceidle(struct platform_device *pdev) +static void omap_uart_set_smartidle(struct platform_device *pdev) { struct omap_device *od = to_omap_device(pdev); - omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE); + omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART); } #else static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable) {} static void omap_uart_set_noidle(struct platform_device *pdev) {} -static void omap_uart_set_forceidle(struct platform_device *pdev) {} +static void omap_uart_set_smartidle(struct platform_device *pdev) {} #endif /* CONFIG_PM */ #ifdef CONFIG_OMAP_MUX @@ -349,7 +349,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata, omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; omap_up.flags = UPF_BOOT_AUTOCONF; omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; - omap_up.set_forceidle = omap_uart_set_forceidle; + omap_up.set_forceidle = omap_uart_set_smartidle; omap_up.set_noidle = omap_uart_set_noidle; omap_up.enable_wakeup = omap_uart_enable_wakeup; omap_up.dma_rx_buf_size = info->dma_rx_buf_size; diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 9dd93453e563..7e755bb0ffc4 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c @@ -897,7 +897,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) ret = sr_late_init(sr_info); if (ret) { pr_warning("%s: Error in SR late init\n", __func__); - return ret; + goto err_iounmap; } } diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 6eeff0e0ae01..5c9acea95761 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -270,7 +270,7 @@ static struct clocksource clocksource_gpt = { static u32 notrace dmtimer_read_sched_clock(void) { if (clksrc.reserved) - return __omap_dm_timer_read_counter(clksrc.io_base, 1); + return __omap_dm_timer_read_counter(&clksrc, 1); return 0; } diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index 031d116fbf10..175b7d86d86a 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c @@ -247,7 +247,7 @@ static void __init omap4_vc_init_channel(struct voltagedomain *voltdm) * omap_vc_i2c_init - initialize I2C interface to PMIC * @voltdm: voltage domain containing VC data * - * Use PMIC supplied seetings for I2C high-speed mode and + * Use PMIC supplied settings for I2C high-speed mode and * master code (if set) and program the VC I2C configuration * register. * @@ -265,8 +265,8 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) if (initialized) { if (voltdm->pmic->i2c_high_speed != i2c_high_speed) - pr_warn("%s: I2C config for all channels must match.", - __func__); + pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).", + __func__, voltdm->name, i2c_high_speed); return; } @@ -292,9 +292,7 @@ void __init omap_vc_init_channel(struct voltagedomain *voltdm) u32 val; if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { - pr_err("%s: PMIC info requried to configure vc for" - "vdd_%s not populated.Hence cannot initialize vc\n", - __func__, voltdm->name); + pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name); return; } diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c index 807391d84a9d..0df88820978d 100644 --- a/arch/arm/mach-omap2/vp.c +++ b/arch/arm/mach-omap2/vp.c @@ -41,6 +41,11 @@ void __init omap_vp_init(struct voltagedomain *voltdm) u32 val, sys_clk_rate, timeout, waittime; u32 vddmin, vddmax, vstepmin, vstepmax; + if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { + pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name); + return; + } + if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c index d93ceef4a50a..37c2de9b6f26 100644 --- a/arch/arm/mach-prima2/irq.c +++ b/arch/arm/mach-prima2/irq.c @@ -68,7 +68,7 @@ void __init sirfsoc_of_irq_init(void) if (!sirfsoc_intc_base) panic("unable to map intc cpu registers\n"); - irq_domain_add_simple(np, 0); + irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL); of_node_put(np); diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 6fcf304d3cdf..a83cf51fc099 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -662,6 +662,7 @@ static struct sh_dmae_pdata usb_dma0_platform_data = { .dmaor_is_32bit = 1, .needs_tend_set = 1, .no_dmars = 1, + .slave_only = 1, }; static struct resource sh7372_usb_dmae0_resources[] = { @@ -723,6 +724,7 @@ static struct sh_dmae_pdata usb_dma1_platform_data = { .dmaor_is_32bit = 1, .needs_tend_set = 1, .no_dmars = 1, + .slave_only = 1, }; static struct resource sh7372_usb_dmae1_resources[] = { diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 02b7b9303f3b..008ce22b9a06 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -98,8 +98,11 @@ static const struct of_device_id sic_of_match[] __initconst = { void __init versatile_init_irq(void) { - vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0); - irq_domain_generate_simple(vic_of_match, VERSATILE_VIC_BASE, IRQ_VIC_START); + struct device_node *np; + + np = of_find_matching_node_by_address(NULL, vic_of_match, + VERSATILE_VIC_BASE); + __vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0, np); writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 07c4bc8ea0a4..7a24d39661f0 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -54,9 +54,15 @@ loop1: and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache +#ifdef CONFIG_PREEMPT + save_and_disable_irqs r9 @ make cssr&csidr read atomic +#endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr +#ifdef CONFIG_PREEMPT + restore_irqs_notrace r9 +#endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ba159370fa5f..80632e8d7538 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -225,8 +225,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) continue; if (__phys_to_pfn(area->phys_addr) > pfn || - __pfn_to_phys(pfn) + offset + size-1 > - area->phys_addr + area->size-1) + __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) continue; /* we can drop the lock here as we know *area is static */ read_unlock(&vmlist_lock); diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 197e96f70405..3dea7231f637 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -8,6 +8,7 @@ config AVR32 select HAVE_KPROBES select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE + select GENERIC_ATOMIC64 select HARDIRQS_SW_RESEND select GENERIC_IRQ_SHOW select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 26e67f0f0051..3c64b2894c13 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -12,6 +12,7 @@ config TMS320C6X select HAVE_GENERIC_HARDIRQS select HAVE_MEMBLOCK select HAVE_SPARSE_IRQ + select IRQ_DOMAIN select OF select OF_EARLY_FLATTREE diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h index a6ae3c9d9c40..f13b78d5e1ca 100644 --- a/arch/c6x/include/asm/irq.h +++ b/arch/c6x/include/asm/irq.h @@ -13,6 +13,7 @@ #ifndef _ASM_C6X_IRQ_H #define _ASM_C6X_IRQ_H +#include <linux/irqdomain.h> #include <linux/threads.h> #include <linux/list.h> #include <linux/radix-tree.h> @@ -41,253 +42,9 @@ /* This number is used when no interrupt has been assigned */ #define NO_IRQ 0 -/* This type is the placeholder for a hardware interrupt number. It has to - * be big enough to enclose whatever representation is used by a given - * platform. - */ -typedef unsigned long irq_hw_number_t; - -/* Interrupt controller "host" data structure. This could be defined as a - * irq domain controller. That is, it handles the mapping between hardware - * and virtual interrupt numbers for a given interrupt domain. The host - * structure is generally created by the PIC code for a given PIC instance - * (though a host can cover more than one PIC if they have a flat number - * model). It's the host callbacks that are responsible for setting the - * irq_chip on a given irq_desc after it's been mapped. - * - * The host code and data structures are fairly agnostic to the fact that - * we use an open firmware device-tree. We do have references to struct - * device_node in two places: in irq_find_host() to find the host matching - * a given interrupt controller node, and of course as an argument to its - * counterpart host->ops->match() callback. However, those are treated as - * generic pointers by the core and the fact that it's actually a device-node - * pointer is purely a convention between callers and implementation. This - * code could thus be used on other architectures by replacing those two - * by some sort of arch-specific void * "token" used to identify interrupt - * controllers. - */ -struct irq_host; -struct radix_tree_root; -struct device_node; - -/* Functions below are provided by the host and called whenever a new mapping - * is created or an old mapping is disposed. The host can then proceed to - * whatever internal data structures management is required. It also needs - * to setup the irq_desc when returning from map(). - */ -struct irq_host_ops { - /* Match an interrupt controller device node to a host, returns - * 1 on a match - */ - int (*match)(struct irq_host *h, struct device_node *node); - - /* Create or update a mapping between a virtual irq number and a hw - * irq number. This is called only once for a given mapping. - */ - int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw); - - /* Dispose of such a mapping */ - void (*unmap)(struct irq_host *h, unsigned int virq); - - /* Translate device-tree interrupt specifier from raw format coming - * from the firmware to a irq_hw_number_t (interrupt line number) and - * type (sense) that can be passed to set_irq_type(). In the absence - * of this callback, irq_create_of_mapping() and irq_of_parse_and_map() - * will return the hw number in the first cell and IRQ_TYPE_NONE for - * the type (which amount to keeping whatever default value the - * interrupt controller has for that line) - */ - int (*xlate)(struct irq_host *h, struct device_node *ctrler, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type); -}; - -struct irq_host { - struct list_head link; - - /* type of reverse mapping technique */ - unsigned int revmap_type; -#define IRQ_HOST_MAP_PRIORITY 0 /* core priority irqs, get irqs 1..15 */ -#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ -#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ -#define IRQ_HOST_MAP_TREE 3 /* radix tree */ - union { - struct { - unsigned int size; - unsigned int *revmap; - } linear; - struct radix_tree_root tree; - } revmap_data; - struct irq_host_ops *ops; - void *host_data; - irq_hw_number_t inval_irq; - - /* Optional device node pointer */ - struct device_node *of_node; -}; - struct irq_data; extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); extern irq_hw_number_t virq_to_hw(unsigned int virq); -extern bool virq_is_host(unsigned int virq, struct irq_host *host); - -/** - * irq_alloc_host - Allocate a new irq_host data structure - * @of_node: optional device-tree node of the interrupt controller - * @revmap_type: type of reverse mapping to use - * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map - * @ops: map/unmap host callbacks - * @inval_irq: provide a hw number in that host space that is always invalid - * - * Allocates and initialize and irq_host structure. Note that in the case of - * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns - * for all legacy interrupts except 0 (which is always the invalid irq for - * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by - * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated - * later during boot automatically (the reverse mapping will use the slow path - * until that happens). - */ -extern struct irq_host *irq_alloc_host(struct device_node *of_node, - unsigned int revmap_type, - unsigned int revmap_arg, - struct irq_host_ops *ops, - irq_hw_number_t inval_irq); - - -/** - * irq_find_host - Locates a host for a given device node - * @node: device-tree node of the interrupt controller - */ -extern struct irq_host *irq_find_host(struct device_node *node); - - -/** - * irq_set_default_host - Set a "default" host - * @host: default host pointer - * - * For convenience, it's possible to set a "default" host that will be used - * whenever NULL is passed to irq_create_mapping(). It makes life easier for - * platforms that want to manipulate a few hard coded interrupt numbers that - * aren't properly represented in the device-tree. - */ -extern void irq_set_default_host(struct irq_host *host); - - -/** - * irq_set_virq_count - Set the maximum number of virt irqs - * @count: number of linux virtual irqs, capped with NR_IRQS - * - * This is mainly for use by platforms like iSeries who want to program - * the virtual irq number in the controller to avoid the reverse mapping - */ -extern void irq_set_virq_count(unsigned int count); - - -/** - * irq_create_mapping - Map a hardware interrupt into linux virq space - * @host: host owning this hardware interrupt or NULL for default host - * @hwirq: hardware irq number in that host space - * - * Only one mapping per hardware interrupt is permitted. Returns a linux - * virq number. - * If the sense/trigger is to be specified, set_irq_type() should be called - * on the number returned from that call. - */ -extern unsigned int irq_create_mapping(struct irq_host *host, - irq_hw_number_t hwirq); - - -/** - * irq_dispose_mapping - Unmap an interrupt - * @virq: linux virq number of the interrupt to unmap - */ -extern void irq_dispose_mapping(unsigned int virq); - -/** - * irq_find_mapping - Find a linux virq from an hw irq number. - * @host: host owning this hardware interrupt - * @hwirq: hardware irq number in that host space - * - * This is a slow path, for use by generic code. It's expected that an - * irq controller implementation directly calls the appropriate low level - * mapping function. - */ -extern unsigned int irq_find_mapping(struct irq_host *host, - irq_hw_number_t hwirq); - -/** - * irq_create_direct_mapping - Allocate a virq for direct mapping - * @host: host to allocate the virq for or NULL for default host - * - * This routine is used for irq controllers which can choose the hardware - * interrupt numbers they generate. In such a case it's simplest to use - * the linux virq as the hardware interrupt number. - */ -extern unsigned int irq_create_direct_mapping(struct irq_host *host); - -/** - * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. - * @host: host owning this hardware interrupt - * @virq: linux irq number - * @hwirq: hardware irq number in that host space - * - * This is for use by irq controllers that use a radix tree reverse - * mapping for fast lookup. - */ -extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, - irq_hw_number_t hwirq); - -/** - * irq_radix_revmap_lookup - Find a linux virq from a hw irq number. - * @host: host owning this hardware interrupt - * @hwirq: hardware irq number in that host space - * - * This is a fast path, for use by irq controller code that uses radix tree - * revmaps - */ -extern unsigned int irq_radix_revmap_lookup(struct irq_host *host, - irq_hw_number_t hwirq); - -/** - * irq_linear_revmap - Find a linux virq from a hw irq number. - * @host: host owning this hardware interrupt - * @hwirq: hardware irq number in that host space - * - * This is a fast path, for use by irq controller code that uses linear - * revmaps. It does fallback to the slow path if the revmap doesn't exist - * yet and will create the revmap entry with appropriate locking - */ - -extern unsigned int irq_linear_revmap(struct irq_host *host, - irq_hw_number_t hwirq); - - - -/** - * irq_alloc_virt - Allocate virtual irq numbers - * @host: host owning these new virtual irqs - * @count: number of consecutive numbers to allocate - * @hint: pass a hint number, the allocator will try to use a 1:1 mapping - * - * This is a low level function that is used internally by irq_create_mapping() - * and that can be used by some irq controllers implementations for things - * like allocating ranges of numbers for MSIs. The revmaps are left untouched. - */ -extern unsigned int irq_alloc_virt(struct irq_host *host, - unsigned int count, - unsigned int hint); - -/** - * irq_free_virt - Free virtual irq numbers - * @virq: virtual irq number of the first interrupt to free - * @count: number of interrupts to free - * - * This function is the opposite of irq_alloc_virt. It will not clear reverse - * maps, this should be done previously by unmap'ing the interrupt. In fact, - * all interrupts covered by the range being freed should have been unmapped - * prior to calling this. - */ -extern void irq_free_virt(unsigned int virq, unsigned int count); extern void __init init_pic_c64xplus(void); diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c index 0929e4b2b244..d77bcfdf0d8e 100644 --- a/arch/c6x/kernel/irq.c +++ b/arch/c6x/kernel/irq.c @@ -73,10 +73,10 @@ asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs) set_irq_regs(old_regs); } -static struct irq_host *core_host; +static struct irq_domain *core_domain; -static int core_host_map(struct irq_host *h, unsigned int virq, - irq_hw_number_t hw) +static int core_domain_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) { if (hw < 4 || hw >= NR_PRIORITY_IRQS) return -EINVAL; @@ -86,8 +86,9 @@ static int core_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops core_host_ops = { - .map = core_host_map, +static const struct irq_domain_ops core_domain_ops = { + .map = core_domain_map, + .xlate = irq_domain_xlate_onecell, }; void __init init_IRQ(void) @@ -100,10 +101,11 @@ void __init init_IRQ(void) np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic"); if (np != NULL) { /* create the core host */ - core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0, - &core_host_ops, 0); - if (core_host) - irq_set_default_host(core_host); + core_domain = irq_domain_add_legacy(np, NR_PRIORITY_IRQS, + 0, 0, &core_domain_ops, + NULL); + if (core_domain) + irq_set_default_host(core_domain); of_node_put(np); } @@ -128,601 +130,15 @@ int arch_show_interrupts(struct seq_file *p, int prec) return 0; } -/* - * IRQ controller and virtual interrupts - */ - -/* The main irq map itself is an array of NR_IRQ entries containing the - * associate host and irq number. An entry with a host of NULL is free. - * An entry can be allocated if it's free, the allocator always then sets - * hwirq first to the host's invalid irq number and then fills ops. - */ -struct irq_map_entry { - irq_hw_number_t hwirq; - struct irq_host *host; -}; - -static LIST_HEAD(irq_hosts); -static DEFINE_RAW_SPINLOCK(irq_big_lock); -static DEFINE_MUTEX(revmap_trees_mutex); -static struct irq_map_entry irq_map[NR_IRQS]; -static unsigned int irq_virq_count = NR_IRQS; -static struct irq_host *irq_default_host; - irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { - return irq_map[d->irq].hwirq; + return d->hwirq; } EXPORT_SYMBOL_GPL(irqd_to_hwirq); irq_hw_number_t virq_to_hw(unsigned int virq) { - return irq_map[virq].hwirq; + struct irq_data *irq_data = irq_get_irq_data(virq); + return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; } EXPORT_SYMBOL_GPL(virq_to_hw); - -bool virq_is_host(unsigned int virq, struct irq_host *host) -{ - return irq_map[virq].host == host; -} -EXPORT_SYMBOL_GPL(virq_is_host); - -static int default_irq_host_match(struct irq_host *h, struct device_node *np) -{ - return h->of_node != NULL && h->of_node == np; -} - -struct irq_host *irq_alloc_host(struct device_node *of_node, - unsigned int revmap_type, - unsigned int revmap_arg, - struct irq_host_ops *ops, - irq_hw_number_t inval_irq) -{ - struct irq_host *host; - unsigned int size = sizeof(struct irq_host); - unsigned int i; - unsigned int *rmap; - unsigned long flags; - - /* Allocate structure and revmap table if using linear mapping */ - if (revmap_type == IRQ_HOST_MAP_LINEAR) - size += revmap_arg * sizeof(unsigned int); - host = kzalloc(size, GFP_KERNEL); - if (host == NULL) - return NULL; - - /* Fill structure */ - host->revmap_type = revmap_type; - host->inval_irq = inval_irq; - host->ops = ops; - host->of_node = of_node_get(of_node); - - if (host->ops->match == NULL) - host->ops->match = default_irq_host_match; - - raw_spin_lock_irqsave(&irq_big_lock, flags); - - /* Check for the priority controller. */ - if (revmap_type == IRQ_HOST_MAP_PRIORITY) { - if (irq_map[0].host != NULL) { - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - of_node_put(host->of_node); - kfree(host); - return NULL; - } - irq_map[0].host = host; - } - - list_add(&host->link, &irq_hosts); - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - - /* Additional setups per revmap type */ - switch (revmap_type) { - case IRQ_HOST_MAP_PRIORITY: - /* 0 is always the invalid number for priority */ - host->inval_irq = 0; - /* setup us as the host for all priority interrupts */ - for (i = 1; i < NR_PRIORITY_IRQS; i++) { - irq_map[i].hwirq = i; - smp_wmb(); - irq_map[i].host = host; - smp_wmb(); - - ops->map(host, i, i); - } - break; - case IRQ_HOST_MAP_LINEAR: - rmap = (unsigned int *)(host + 1); - for (i = 0; i < revmap_arg; i++) - rmap[i] = NO_IRQ; - host->revmap_data.linear.size = revmap_arg; - smp_wmb(); - host->revmap_data.linear.revmap = rmap; - break; - case IRQ_HOST_MAP_TREE: - INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); - break; - default: - break; - } - - pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); - - return host; -} - -struct irq_host *irq_find_host(struct device_node *node) -{ - struct irq_host *h, *found = NULL; - unsigned long flags; - - /* We might want to match the legacy controller last since - * it might potentially be set to match all interrupts in - * the absence of a device node. This isn't a problem so far - * yet though... - */ - raw_spin_lock_irqsave(&irq_big_lock, flags); - list_for_each_entry(h, &irq_hosts, link) - if (h->ops->match(h, node)) { - found = h; - break; - } - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - return found; -} -EXPORT_SYMBOL_GPL(irq_find_host); - -void irq_set_default_host(struct irq_host *host) -{ - pr_debug("irq: Default host set to @0x%p\n", host); - - irq_default_host = host; -} - -void irq_set_virq_count(unsigned int count) -{ - pr_debug("irq: Trying to set virq count to %d\n", count); - - BUG_ON(count < NR_PRIORITY_IRQS); - if (count < NR_IRQS) - irq_virq_count = count; -} - -static int irq_setup_virq(struct irq_host *host, unsigned int virq, - irq_hw_number_t hwirq) -{ - int res; - - res = irq_alloc_desc_at(virq, 0); - if (res != virq) { - pr_debug("irq: -> allocating desc failed\n"); - goto error; - } - - /* map it */ - smp_wmb(); - irq_map[virq].hwirq = hwirq; - smp_mb(); - - if (host->ops->map(host, virq, hwirq)) { - pr_debug("irq: -> mapping failed, freeing\n"); - goto errdesc; - } - - irq_clear_status_flags(virq, IRQ_NOREQUEST); - - return 0; - -errdesc: - irq_free_descs(virq, 1); -error: - irq_free_virt(virq, 1); - return -1; -} - -unsigned int irq_create_direct_mapping(struct irq_host *host) -{ - unsigned int virq; - - if (host == NULL) - host = irq_default_host; - - BUG_ON(host == NULL); - WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); - - virq = irq_alloc_virt(host, 1, 0); - if (virq == NO_IRQ) { - pr_debug("irq: create_direct virq allocation failed\n"); - return NO_IRQ; - } - - pr_debug("irq: create_direct obtained virq %d\n", virq); - - if (irq_setup_virq(host, virq, virq)) - return NO_IRQ; - - return virq; -} - -unsigned int irq_create_mapping(struct irq_host *host, - irq_hw_number_t hwirq) -{ - unsigned int virq, hint; - - pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); - - /* Look for default host if nececssary */ - if (host == NULL) - host = irq_default_host; - if (host == NULL) { - printk(KERN_WARNING "irq_create_mapping called for" - " NULL host, hwirq=%lx\n", hwirq); - WARN_ON(1); - return NO_IRQ; - } - pr_debug("irq: -> using host @%p\n", host); - - /* Check if mapping already exists */ - virq = irq_find_mapping(host, hwirq); - if (virq != NO_IRQ) { - pr_debug("irq: -> existing mapping on virq %d\n", virq); - return virq; - } - - /* Allocate a virtual interrupt number */ - hint = hwirq % irq_virq_count; - virq = irq_alloc_virt(host, 1, hint); - if (virq == NO_IRQ) { - pr_debug("irq: -> virq allocation failed\n"); - return NO_IRQ; - } - - if (irq_setup_virq(host, virq, hwirq)) - return NO_IRQ; - - pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", - hwirq, host->of_node ? host->of_node->full_name : "null", virq); - - return virq; -} -EXPORT_SYMBOL_GPL(irq_create_mapping); - -unsigned int irq_create_of_mapping(struct device_node *controller, - const u32 *intspec, unsigned int intsize) -{ - struct irq_host *host; - irq_hw_number_t hwirq; - unsigned int type = IRQ_TYPE_NONE; - unsigned int virq; - - if (controller == NULL) - host = irq_default_host; - else - host = irq_find_host(controller); - if (host == NULL) { - printk(KERN_WARNING "irq: no irq host found for %s !\n", - controller->full_name); - return NO_IRQ; - } - - /* If host has no translation, then we assume interrupt line */ - if (host->ops->xlate == NULL) - hwirq = intspec[0]; - else { - if (host->ops->xlate(host, controller, intspec, intsize, - &hwirq, &type)) - return NO_IRQ; - } - - /* Create mapping */ - virq = irq_create_mapping(host, hwirq); - if (virq == NO_IRQ) - return virq; - - /* Set type if specified and different than the current one */ - if (type != IRQ_TYPE_NONE && - type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) - irq_set_irq_type(virq, type); - return virq; -} -EXPORT_SYMBOL_GPL(irq_create_of_mapping); - -void irq_dispose_mapping(unsigned int virq) -{ - struct irq_host *host; - irq_hw_number_t hwirq; - - if (virq == NO_IRQ) - return; - - /* Never unmap priority interrupts */ - if (virq < NR_PRIORITY_IRQS) - return; - - host = irq_map[virq].host; - if (WARN_ON(host == NULL)) - return; - - irq_set_status_flags(virq, IRQ_NOREQUEST); - - /* remove chip and handler */ - irq_set_chip_and_handler(virq, NULL, NULL); - - /* Make sure it's completed */ - synchronize_irq(virq); - - /* Tell the PIC about it */ - if (host->ops->unmap) - host->ops->unmap(host, virq); - smp_mb(); - - /* Clear reverse map */ - hwirq = irq_map[virq].hwirq; - switch (host->revmap_type) { - case IRQ_HOST_MAP_LINEAR: - if (hwirq < host->revmap_data.linear.size) - host->revmap_data.linear.revmap[hwirq] = NO_IRQ; - break; - case IRQ_HOST_MAP_TREE: - mutex_lock(&revmap_trees_mutex); - radix_tree_delete(&host->revmap_data.tree, hwirq); - mutex_unlock(&revmap_trees_mutex); - break; - } - - /* Destroy map */ - smp_mb(); - irq_map[virq].hwirq = host->inval_irq; - - irq_free_descs(virq, 1); - /* Free it */ - irq_free_virt(virq, 1); -} -EXPORT_SYMBOL_GPL(irq_dispose_mapping); - -unsigned int irq_find_mapping(struct irq_host *host, - irq_hw_number_t hwirq) -{ - unsigned int i; - unsigned int hint = hwirq % irq_virq_count; - - /* Look for default host if nececssary */ - if (host == NULL) - host = irq_default_host; - if (host == NULL) - return NO_IRQ; - - /* Slow path does a linear search of the map */ - i = hint; - do { - if (irq_map[i].host == host && - irq_map[i].hwirq == hwirq) - return i; - i++; - if (i >= irq_virq_count) - i = 4; - } while (i != hint); - return NO_IRQ; -} -EXPORT_SYMBOL_GPL(irq_find_mapping); - -unsigned int irq_radix_revmap_lookup(struct irq_host *host, - irq_hw_number_t hwirq) -{ - struct irq_map_entry *ptr; - unsigned int virq; - - if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) - return irq_find_mapping(host, hwirq); - - /* - * The ptr returned references the static global irq_map. - * but freeing an irq can delete nodes along the path to - * do the lookup via call_rcu. - */ - rcu_read_lock(); - ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); - rcu_read_unlock(); - - /* - * If found in radix tree, then fine. - * Else fallback to linear lookup - this should not happen in practice - * as it means that we failed to insert the node in the radix tree. - */ - if (ptr) - virq = ptr - irq_map; - else - virq = irq_find_mapping(host, hwirq); - - return virq; -} - -void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, - irq_hw_number_t hwirq) -{ - if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) - return; - - if (virq != NO_IRQ) { - mutex_lock(&revmap_trees_mutex); - radix_tree_insert(&host->revmap_data.tree, hwirq, - &irq_map[virq]); - mutex_unlock(&revmap_trees_mutex); - } -} - -unsigned int irq_linear_revmap(struct irq_host *host, - irq_hw_number_t hwirq) -{ - unsigned int *revmap; - - if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) - return irq_find_mapping(host, hwirq); - - /* Check revmap bounds */ - if (unlikely(hwirq >= host->revmap_data.linear.size)) - return irq_find_mapping(host, hwirq); - - /* Check if revmap was allocated */ - revmap = host->revmap_data.linear.revmap; - if (unlikely(revmap == NULL)) - return irq_find_mapping(host, hwirq); - - /* Fill up revmap with slow path if no mapping found */ - if (unlikely(revmap[hwirq] == NO_IRQ)) - revmap[hwirq] = irq_find_mapping(host, hwirq); - - return revmap[hwirq]; -} - -unsigned int irq_alloc_virt(struct irq_host *host, - unsigned int count, - unsigned int hint) -{ - unsigned long flags; - unsigned int i, j, found = NO_IRQ; - - if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS)) - return NO_IRQ; - - raw_spin_lock_irqsave(&irq_big_lock, flags); - - /* Use hint for 1 interrupt if any */ - if (count == 1 && hint >= NR_PRIORITY_IRQS && - hint < irq_virq_count && irq_map[hint].host == NULL) { - found = hint; - goto hint_found; - } - - /* Look for count consecutive numbers in the allocatable - * (non-legacy) space - */ - for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) { - if (irq_map[i].host != NULL) - j = 0; - else - j++; - - if (j == count) { - found = i - count + 1; - break; - } - } - if (found == NO_IRQ) { - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - return NO_IRQ; - } - hint_found: - for (i = found; i < (found + count); i++) { - irq_map[i].hwirq = host->inval_irq; - smp_wmb(); - irq_map[i].host = host; - } - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - return found; -} - -void irq_free_virt(unsigned int virq, unsigned int count) -{ - unsigned long flags; - unsigned int i; - - WARN_ON(virq < NR_PRIORITY_IRQS); - WARN_ON(count == 0 || (virq + count) > irq_virq_count); - - if (virq < NR_PRIORITY_IRQS) { - if (virq + count < NR_PRIORITY_IRQS) - return; - count -= NR_PRIORITY_IRQS - virq; - virq = NR_PRIORITY_IRQS; - } - - if (count > irq_virq_count || virq > irq_virq_count - count) { - if (virq > irq_virq_count) - return; - count = irq_virq_count - virq; - } - - raw_spin_lock_irqsave(&irq_big_lock, flags); - for (i = virq; i < (virq + count); i++) { - struct irq_host *host; - - host = irq_map[i].host; - irq_map[i].hwirq = host->inval_irq; - smp_wmb(); - irq_map[i].host = NULL; - } - raw_spin_unlock_irqrestore(&irq_big_lock, flags); -} - -#ifdef CONFIG_VIRQ_DEBUG -static int virq_debug_show(struct seq_file *m, void *private) -{ - unsigned long flags; - struct irq_desc *desc; - const char *p; - static const char none[] = "none"; - void *data; - int i; - - seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", - "chip name", "chip data", "host name"); - - for (i = 1; i < nr_irqs; i++) { - desc = irq_to_desc(i); - if (!desc) - continue; - - raw_spin_lock_irqsave(&desc->lock, flags); - - if (desc->action && desc->action->handler) { - struct irq_chip *chip; - - seq_printf(m, "%5d ", i); - seq_printf(m, "0x%05lx ", irq_map[i].hwirq); - - chip = irq_desc_get_chip(desc); - if (chip && chip->name) - p = chip->name; - else - p = none; - seq_printf(m, "%-15s ", p); - - data = irq_desc_get_chip_data(desc); - seq_printf(m, "0x%16p ", data); - - if (irq_map[i].host && irq_map[i].host->of_node) - p = irq_map[i].host->of_node->full_name; - else - p = none; - seq_printf(m, "%s\n", p); - } - - raw_spin_unlock_irqrestore(&desc->lock, flags); - } - - return 0; -} - -static int virq_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, virq_debug_show, inode->i_private); -} - -static const struct file_operations virq_debug_fops = { - .open = virq_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init irq_debugfs_init(void) -{ - if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, - NULL, &virq_debug_fops) == NULL) - return -ENOMEM; - - return 0; -} -device_initcall(irq_debugfs_init); -#endif /* CONFIG_VIRQ_DEBUG */ diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c index 7c37a947fb1c..c1c4e2ae3f85 100644 --- a/arch/c6x/platforms/megamod-pic.c +++ b/arch/c6x/platforms/megamod-pic.c @@ -48,7 +48,7 @@ struct megamod_regs { }; struct megamod_pic { - struct irq_host *irqhost; + struct irq_domain *irqhost; struct megamod_regs __iomem *regs; raw_spinlock_t lock; @@ -116,7 +116,7 @@ static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc) } } -static int megamod_map(struct irq_host *h, unsigned int virq, +static int megamod_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct megamod_pic *pic = h->host_data; @@ -136,21 +136,9 @@ static int megamod_map(struct irq_host *h, unsigned int virq, return 0; } -static int megamod_xlate(struct irq_host *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type) - -{ - /* megamod intspecs must have 1 cell */ - BUG_ON(intsize != 1); - *out_hwirq = intspec[0]; - *out_type = IRQ_TYPE_NONE; - return 0; -} - -static struct irq_host_ops megamod_host_ops = { +static const struct irq_domain_ops megamod_domain_ops = { .map = megamod_map, - .xlate = megamod_xlate, + .xlate = irq_domain_xlate_onecell, }; static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output) @@ -223,9 +211,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np) return NULL; } - pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, - NR_COMBINERS * 32, &megamod_host_ops, - IRQ_UNMAPPED); + pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32, + &megamod_domain_ops, pic); if (!pic->irqhost) { pr_err("%s: Could not alloc host.\n", np->full_name); goto error_free; diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index c8d6efb99dbf..11060fa87da3 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -14,6 +14,7 @@ config MICROBLAZE select TRACING_SUPPORT select OF select OF_EARLY_FLATTREE + select IRQ_DOMAIN select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW diff --git a/arch/microblaze/include/asm/hardirq.h b/arch/microblaze/include/asm/hardirq.h index cd1ac9aad56c..fb3c05a0cbbf 100644 --- a/arch/microblaze/include/asm/hardirq.h +++ b/arch/microblaze/include/asm/hardirq.h @@ -1,17 +1 @@ -/* - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_MICROBLAZE_HARDIRQ_H -#define _ASM_MICROBLAZE_HARDIRQ_H - -/* should be defined in each interrupt controller driver */ -extern unsigned int get_irq(struct pt_regs *regs); - #include <asm-generic/hardirq.h> - -#endif /* _ASM_MICROBLAZE_HARDIRQ_H */ diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h index a175132e4496..bab3b1393ad4 100644 --- a/arch/microblaze/include/asm/irq.h +++ b/arch/microblaze/include/asm/irq.h @@ -9,49 +9,13 @@ #ifndef _ASM_MICROBLAZE_IRQ_H #define _ASM_MICROBLAZE_IRQ_H - -/* - * Linux IRQ# is currently offset by one to map to the hardware - * irq number. So hardware IRQ0 maps to Linux irq 1. - */ -#define NO_IRQ_OFFSET 1 -#define IRQ_OFFSET NO_IRQ_OFFSET -#define NR_IRQS (32 + IRQ_OFFSET) +#define NR_IRQS (32 + 1) #include <asm-generic/irq.h> -/* This type is the placeholder for a hardware interrupt number. It has to - * be big enough to enclose whatever representation is used by a given - * platform. - */ -typedef unsigned long irq_hw_number_t; - -extern unsigned int nr_irq; - struct pt_regs; extern void do_IRQ(struct pt_regs *regs); -/** FIXME - not implement - * irq_dispose_mapping - Unmap an interrupt - * @virq: linux virq number of the interrupt to unmap - */ -static inline void irq_dispose_mapping(unsigned int virq) -{ - return; -} - -struct irq_host; - -/** - * irq_create_mapping - Map a hardware interrupt into linux virq space - * @host: host owning this hardware interrupt or NULL for default host - * @hwirq: hardware irq number in that host space - * - * Only one mapping per hardware interrupt is permitted. Returns a linux - * virq number. - * If the sense/trigger is to be specified, set_irq_type() should be called - * on the number returned from that call. - */ -extern unsigned int irq_create_mapping(struct irq_host *host, - irq_hw_number_t hwirq); +/* should be defined in each interrupt controller driver */ +extern unsigned int get_irq(void); #endif /* _ASM_MICROBLAZE_IRQ_H */ diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c index 44b177e2ab12..ad120672cee5 100644 --- a/arch/microblaze/kernel/intc.c +++ b/arch/microblaze/kernel/intc.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> +#include <linux/irqdomain.h> #include <linux/irq.h> #include <asm/page.h> #include <linux/io.h> @@ -25,8 +26,6 @@ static unsigned int intc_baseaddr; #define INTC_BASE intc_baseaddr #endif -unsigned int nr_irq; - /* No one else should require these constants, so define them locally here. */ #define ISR 0x00 /* Interrupt Status Register */ #define IPR 0x04 /* Interrupt Pending Register */ @@ -84,24 +83,45 @@ static struct irq_chip intc_dev = { .irq_mask_ack = intc_mask_ack, }; -unsigned int get_irq(struct pt_regs *regs) +static struct irq_domain *root_domain; + +unsigned int get_irq(void) { - int irq; + unsigned int hwirq, irq = -1; - /* - * NOTE: This function is the one that needs to be improved in - * order to handle multiple interrupt controllers. It currently - * is hardcoded to check for interrupts only on the first INTC. - */ - irq = in_be32(INTC_BASE + IVR) + NO_IRQ_OFFSET; - pr_debug("get_irq: %d\n", irq); + hwirq = in_be32(INTC_BASE + IVR); + if (hwirq != -1U) + irq = irq_find_mapping(root_domain, hwirq); + + pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq); return irq; } +int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +{ + u32 intr_mask = (u32)d->host_data; + + if (intr_mask & (1 << hw)) { + irq_set_chip_and_handler_name(irq, &intc_dev, + handle_edge_irq, "edge"); + irq_clear_status_flags(irq, IRQ_LEVEL); + } else { + irq_set_chip_and_handler_name(irq, &intc_dev, + handle_level_irq, "level"); + irq_set_status_flags(irq, IRQ_LEVEL); + } + return 0; +} + +static const struct irq_domain_ops xintc_irq_domain_ops = { + .xlate = irq_domain_xlate_onetwocell, + .map = xintc_map, +}; + void __init init_IRQ(void) { - u32 i, intr_mask; + u32 nr_irq, intr_mask; struct device_node *intc = NULL; #ifdef CONFIG_SELFMOD_INTC unsigned int intc_baseaddr = 0; @@ -146,16 +166,9 @@ void __init init_IRQ(void) /* Turn on the Master Enable. */ out_be32(intc_baseaddr + MER, MER_HIE | MER_ME); - for (i = IRQ_OFFSET; i < (nr_irq + IRQ_OFFSET); ++i) { - if (intr_mask & (0x00000001 << (i - IRQ_OFFSET))) { - irq_set_chip_and_handler_name(i, &intc_dev, - handle_edge_irq, "edge"); - irq_clear_status_flags(i, IRQ_LEVEL); - } else { - irq_set_chip_and_handler_name(i, &intc_dev, - handle_level_irq, "level"); - irq_set_status_flags(i, IRQ_LEVEL); - } - irq_get_irq_data(i)->hwirq = i - IRQ_OFFSET; - } + /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm + * lazy and Michal can clean it up to something nicer when he tests + * and commits this patch. ~~gcl */ + root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops, + (void *)intr_mask); } diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index bbebcae72c02..ace700afbfdf 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -31,14 +31,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs) trace_hardirqs_off(); irq_enter(); - irq = get_irq(regs); + irq = get_irq(); next_irq: BUG_ON(!irq); - /* Substract 1 because of get_irq */ - generic_handle_irq(irq + IRQ_OFFSET - NO_IRQ_OFFSET); + generic_handle_irq(irq); - irq = get_irq(regs); - if (irq) { + irq = get_irq(); + if (irq != -1U) { pr_debug("next irq: %d\n", irq); ++concurrent_irq; goto next_irq; @@ -48,18 +47,3 @@ next_irq: set_irq_regs(old_regs); trace_hardirqs_on(); } - -/* MS: There is no any advance mapping mechanism. We are using simple 32bit - intc without any cascades or any connection that's why mapping is 1:1 */ -unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) -{ - return hwirq + IRQ_OFFSET; -} -EXPORT_SYMBOL_GPL(irq_create_mapping); - -unsigned int irq_create_of_mapping(struct device_node *controller, - const u32 *intspec, unsigned int intsize) -{ - return intspec[0] + IRQ_OFFSET; -} -EXPORT_SYMBOL_GPL(irq_create_of_mapping); diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index d4fc1a971779..70e6d0b41ab4 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -26,7 +26,6 @@ #include <linux/cache.h> #include <linux/of_platform.h> #include <linux/dma-mapping.h> -#include <linux/cpu.h> #include <asm/cacheflush.h> #include <asm/entry.h> #include <asm/cpuinfo.h> @@ -52,8 +51,6 @@ void __init setup_arch(char **cmdline_p) unflatten_device_tree(); - /* NOTE I think that this function is not necessary to call */ - /* irq_early_init(); */ setup_cpuinfo(); microblaze_cache_init(); @@ -227,23 +224,5 @@ static int __init setup_bus_notifier(void) return 0; } -arch_initcall(setup_bus_notifier); - -static DEFINE_PER_CPU(struct cpu, cpu_devices); - -static int __init topology_init(void) -{ - int i, ret; - - for_each_present_cpu(i) { - struct cpu *c = &per_cpu(cpu_devices, i); - ret = register_cpu(c, i); - if (ret) - printk(KERN_WARNING "topology_init: register_cpu %d " - "failed (%d)\n", i, ret); - } - - return 0; -} -subsys_initcall(topology_init); +arch_initcall(setup_bus_notifier); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c4c1312473fb..edbbae17e820 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2327,6 +2327,7 @@ config USE_OF bool "Flattened Device Tree support" select OF select OF_EARLY_FLATTREE + select IRQ_DOMAIN help Include support for flattened device tree machine descriptions. @@ -2356,6 +2357,7 @@ config PCI depends on HW_HAS_PCI select PCI_DOMAINS select GENERIC_PCI_IOMAP + select NO_GENERIC_PCI_IOPORT_MAP help Find out whether you have a PCI motherboard. PCI is the name of a bus system, i.e. the way the CPU talks to the other stuff inside diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 2354c870a63a..fb698dc09bc9 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -11,15 +11,12 @@ #include <linux/linkage.h> #include <linux/smp.h> +#include <linux/irqdomain.h> #include <asm/mipsmtregs.h> #include <irq.h> -static inline void irq_dispose_mapping(unsigned int virq) -{ -} - #ifdef CONFIG_I8259 static inline int irq_canonicalize(int irq) { diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 6b8b4208481e..558b5395795d 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -60,20 +60,6 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start, } #endif -/* - * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq# - * - * Currently the mapping mechanism is trivial; simple flat hwirq numbers are - * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not - * supported. - */ -unsigned int irq_create_of_mapping(struct device_node *controller, - const u32 *intspec, unsigned int intsize) -{ - return intspec[0]; -} -EXPORT_SYMBOL_GPL(irq_create_of_mapping); - void __init early_init_devtree(void *params) { /* Setup flat device-tree pointer */ diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c index 2635b1a96333..fd35daa45314 100644 --- a/arch/mips/lib/iomap-pci.c +++ b/arch/mips/lib/iomap-pci.c @@ -10,8 +10,8 @@ #include <linux/module.h> #include <asm/io.h> -static void __iomem *ioport_map_pci(struct pci_dev *dev, - unsigned long port, unsigned int nr) +void __iomem *__pci_ioport_map(struct pci_dev *dev, + unsigned long port, unsigned int nr) { struct pci_controller *ctrl = dev->bus->sysdata; unsigned long base = ctrl->io_map_base; diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h index e1f3fe26606c..bbb34e5343a2 100644 --- a/arch/openrisc/include/asm/prom.h +++ b/arch/openrisc/include/asm/prom.h @@ -24,6 +24,7 @@ #include <linux/types.h> #include <asm/irq.h> +#include <linux/irqdomain.h> #include <linux/atomic.h> #include <linux/of_irq.h> #include <linux/of_fdt.h> @@ -63,15 +64,6 @@ extern const void *of_get_mac_address(struct device_node *np); struct pci_dev; extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); -/* This routine is here to provide compatibility with how powerpc - * handles IRQ mapping for OF device nodes. We precompute and permanently - * register them in the platform_device objects, whereas powerpc computes them - * on request. - */ -static inline void irq_dispose_mapping(unsigned int virq) -{ -} - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_OPENRISC_PROM_H */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1919634a9b32..303703d716fe 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -135,6 +135,7 @@ config PPC select HAVE_GENERIC_HARDIRQS select HAVE_SPARSE_IRQ select IRQ_PER_CPU + select IRQ_DOMAIN select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select IRQ_FORCED_THREADING diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h index a9e1f4f796f6..dc7d48e3ea90 100644 --- a/arch/powerpc/include/asm/ehv_pic.h +++ b/arch/powerpc/include/asm/ehv_pic.h @@ -25,7 +25,7 @@ struct ehv_pic { /* The remapper for this EHV_PIC */ - struct irq_host *irqhost; + struct irq_domain *irqhost; /* The "linux" controller struct */ struct irq_chip hc_irq; diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h index 105ade297aad..c3fdfbd5a673 100644 --- a/arch/powerpc/include/asm/i8259.h +++ b/arch/powerpc/include/asm/i8259.h @@ -6,7 +6,7 @@ extern void i8259_init(struct device_node *node, unsigned long intack_addr); extern unsigned int i8259_irq(void); -extern struct irq_host *i8259_get_host(void); +extern struct irq_domain *i8259_get_host(void); #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_I8259_H */ diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index c0e1bc319e35..fe0b09dceb7d 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -9,6 +9,7 @@ * 2 of the License, or (at your option) any later version. */ +#include <linux/irqdomain.h> #include <linux/threads.h> #include <linux/list.h> #include <linux/radix-tree.h> @@ -35,258 +36,12 @@ extern atomic_t ppc_n_lost_interrupts; /* Total number of virq in the platform */ #define NR_IRQS CONFIG_NR_IRQS -/* Number of irqs reserved for the legacy controller */ -#define NUM_ISA_INTERRUPTS 16 - /* Same thing, used by the generic IRQ code */ #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS -/* This type is the placeholder for a hardware interrupt number. It has to - * be big enough to enclose whatever representation is used by a given - * platform. - */ -typedef unsigned long irq_hw_number_t; - -/* Interrupt controller "host" data structure. This could be defined as a - * irq domain controller. That is, it handles the mapping between hardware - * and virtual interrupt numbers for a given interrupt domain. The host - * structure is generally created by the PIC code for a given PIC instance - * (though a host can cover more than one PIC if they have a flat number - * model). It's the host callbacks that are responsible for setting the - * irq_chip on a given irq_desc after it's been mapped. - * - * The host code and data structures are fairly agnostic to the fact that - * we use an open firmware device-tree. We do have references to struct - * device_node in two places: in irq_find_host() to find the host matching - * a given interrupt controller node, and of course as an argument to its - * counterpart host->ops->match() callback. However, those are treated as - * generic pointers by the core and the fact that it's actually a device-node - * pointer is purely a convention between callers and implementation. This - * code could thus be used on other architectures by replacing those two - * by some sort of arch-specific void * "token" used to identify interrupt - * controllers. - */ -struct irq_host; -struct radix_tree_root; - -/* Functions below are provided by the host and called whenever a new mapping - * is created or an old mapping is disposed. The host can then proceed to - * whatever internal data structures management is required. It also needs - * to setup the irq_desc when returning from map(). - */ -struct irq_host_ops { - /* Match an interrupt controller device node to a host, returns - * 1 on a match - */ - int (*match)(struct irq_host *h, struct device_node *node); - - /* Create or update a mapping between a virtual irq number and a hw - * irq number. This is called only once for a given mapping. - */ - int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw); - - /* Dispose of such a mapping */ - void (*unmap)(struct irq_host *h, unsigned int virq); - - /* Translate device-tree interrupt specifier from raw format coming - * from the firmware to a irq_hw_number_t (interrupt line number) and - * type (sense) that can be passed to set_irq_type(). In the absence - * of this callback, irq_create_of_mapping() and irq_of_parse_and_map() - * will return the hw number in the first cell and IRQ_TYPE_NONE for - * the type (which amount to keeping whatever default value the - * interrupt controller has for that line) - */ - int (*xlate)(struct irq_host *h, struct device_node *ctrler, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type); -}; - -struct irq_host { - struct list_head link; - - /* type of reverse mapping technique */ - unsigned int revmap_type; -#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */ -#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ -#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ -#define IRQ_HOST_MAP_TREE 3 /* radix tree */ - union { - struct { - unsigned int size; - unsigned int *revmap; - } linear; - struct radix_tree_root tree; - } revmap_data; - struct irq_host_ops *ops; - void *host_data; - irq_hw_number_t inval_irq; - - /* Optional device node pointer */ - struct device_node *of_node; -}; - struct irq_data; extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); extern irq_hw_number_t virq_to_hw(unsigned int virq); -extern bool virq_is_host(unsigned int virq, struct irq_host *host); - -/** - * irq_alloc_host - Allocate a new irq_host data structure - * @of_node: optional device-tree node of the interrupt controller - * @revmap_type: type of reverse mapping to use - * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map - * @ops: map/unmap host callbacks - * @inval_irq: provide a hw number in that host space that is always invalid - * - * Allocates and initialize and irq_host structure. Note that in the case of - * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns - * for all legacy interrupts except 0 (which is always the invalid irq for - * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by - * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated - * later during boot automatically (the reverse mapping will use the slow path - * until that happens). - */ -extern struct irq_host *irq_alloc_host(struct device_node *of_node, - unsigned int revmap_type, - unsigned int revmap_arg, - struct irq_host_ops *ops, - irq_hw_number_t inval_irq); - - -/** - * irq_find_host - Locates a host for a given device node - * @node: device-tree node of the interrupt controller - */ -extern struct irq_host *irq_find_host(struct device_node *node); - - -/** - * irq_set_default_host - Set a "default" host - * @host: default host pointer - * - * For convenience, it's possible to set a "default" host that will be used - * whenever NULL is passed to irq_create_mapping(). It makes life easier for - * platforms that want to manipulate a few hard coded interrupt numbers that - * aren't properly represented in the device-tree. - */ -extern void irq_set_default_host(struct irq_host *host); - - -/** - * irq_set_virq_count - Set the maximum number of virt irqs - * @count: number of linux virtual irqs, capped with NR_IRQS - * - * This is mainly for use by platforms like iSeries who want to program - * the virtual irq number in the controller to avoid the reverse mapping - */ -extern void irq_set_virq_count(unsigned int count); - - -/** - * irq_create_mapping - Map a hardware interrupt into linux virq space - * @host: host owning this hardware interrupt or NULL for default host - * @hwirq: hardware irq number in that host space - * - * Only one mapping per hardware interrupt is permitted. Returns a linux - * virq number. - * If the sense/trigger is to be specified, set_irq_type() should be called - * on the number returned from that call. - */ -extern unsigned int irq_create_mapping(struct irq_host *host, - irq_hw_number_t hwirq); - - -/** - * irq_dispose_mapping - Unmap an interrupt - * @virq: linux virq number of the interrupt to unmap - */ -extern void irq_dispose_mapping(unsigned int virq); - -/** - * irq_find_mapping - Find a linux virq from an hw irq number. - * @host: host owning this hardware interrupt - * @hwirq: hardware irq number in that host space - * - * This is a slow path, for use by generic code. It's expected that an - * irq controller implementation directly calls the appropriate low level - * mapping function. - */ -extern unsigned int irq_find_mapping(struct irq_host *host, - irq_hw_number_t hwirq); - -/** - * irq_create_direct_mapping - Allocate a virq for direct mapping - * @host: host to allocate the virq for or NULL for default host - * - * This routine is used for irq controllers which can choose the hardware - * interrupt numbers they generate. In such a case it's simplest to use - * the linux virq as the hardware interrupt number. - */ -extern unsigned int irq_create_direct_mapping(struct irq_host *host); - -/** - * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. - * @host: host owning this hardware interrupt - * @virq: linux irq number - * @hwirq: hardware irq number in that host space - * - * This is for use by irq controllers that use a radix tree reverse - * mapping for fast lookup. - */ -extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, - irq_hw_number_t hwirq); - -/** - * irq_radix_revmap_lookup - Find a linux virq from a hw irq number. - * @host: host owning this hardware interrupt - * @hwirq: hardware irq number in that host space - * - * This is a fast path, for use by irq controller code that uses radix tree - * revmaps - */ -extern unsigned int irq_radix_revmap_lookup(struct irq_host *host, - irq_hw_number_t hwirq); - -/** - * irq_linear_revmap - Find a linux virq from a hw irq number. - * @host: host owning this hardware interrupt - * @hwirq: hardware irq number in that host space - * - * This is a fast path, for use by irq controller code that uses linear - * revmaps. It does fallback to the slow path if the revmap doesn't exist - * yet and will create the revmap entry with appropriate locking - */ - -extern unsigned int irq_linear_revmap(struct irq_host *host, - irq_hw_number_t hwirq); - - - -/** - * irq_alloc_virt - Allocate virtual irq numbers - * @host: host owning these new virtual irqs - * @count: number of consecutive numbers to allocate - * @hint: pass a hint number, the allocator will try to use a 1:1 mapping - * - * This is a low level function that is used internally by irq_create_mapping() - * and that can be used by some irq controllers implementations for things - * like allocating ranges of numbers for MSIs. The revmaps are left untouched. - */ -extern unsigned int irq_alloc_virt(struct irq_host *host, - unsigned int count, - unsigned int hint); - -/** - * irq_free_virt - Free virtual irq numbers - * @virq: virtual irq number of the first interrupt to free - * @count: number of interrupts to free - * - * This function is the opposite of irq_alloc_virt. It will not clear reverse - * maps, this should be done previously by unmap'ing the interrupt. In fact, - * all interrupts covered by the range being freed should have been unmapped - * prior to calling this. - */ -extern void irq_free_virt(unsigned int virq, unsigned int count); /** * irq_early_init - Init irq remapping subsystem diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index 67b4d9837236..a5b7c56237f9 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -255,7 +255,7 @@ struct mpic struct device_node *node; /* The remapper for this MPIC */ - struct irq_host *irqhost; + struct irq_domain *irqhost; /* The "linux" controller struct */ struct irq_chip hc_irq; diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index c48de98ba94e..4ae9a09c3b89 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -86,7 +86,7 @@ struct ics { extern unsigned int xics_default_server; extern unsigned int xics_default_distrib_server; extern unsigned int xics_interrupt_server_size; -extern struct irq_host *xics_host; +extern struct irq_domain *xics_host; struct xics_cppr { unsigned char stack[MAX_NUM_PRIORITIES]; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 701d4aceb4f4..e3673ff6b7a0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -486,409 +486,19 @@ void do_softirq(void) local_irq_restore(flags); } - -/* - * IRQ controller and virtual interrupts - */ - -/* The main irq map itself is an array of NR_IRQ entries containing the - * associate host and irq number. An entry with a host of NULL is free. - * An entry can be allocated if it's free, the allocator always then sets - * hwirq first to the host's invalid irq number and then fills ops. - */ -struct irq_map_entry { - irq_hw_number_t hwirq; - struct irq_host *host; -}; - -static LIST_HEAD(irq_hosts); -static DEFINE_RAW_SPINLOCK(irq_big_lock); -static DEFINE_MUTEX(revmap_trees_mutex); -static struct irq_map_entry irq_map[NR_IRQS]; -static unsigned int irq_virq_count = NR_IRQS; -static struct irq_host *irq_default_host; - irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { - return irq_map[d->irq].hwirq; + return d->hwirq; } EXPORT_SYMBOL_GPL(irqd_to_hwirq); irq_hw_number_t virq_to_hw(unsigned int virq) { - return irq_map[virq].hwirq; + struct irq_data *irq_data = irq_get_irq_data(virq); + return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; } EXPORT_SYMBOL_GPL(virq_to_hw); -bool virq_is_host(unsigned int virq, struct irq_host *host) -{ - return irq_map[virq].host == host; -} -EXPORT_SYMBOL_GPL(virq_is_host); - -static int default_irq_host_match(struct irq_host *h, struct device_node *np) -{ - return h->of_node != NULL && h->of_node == np; -} - -struct irq_host *irq_alloc_host(struct device_node *of_node, - unsigned int revmap_type, - unsigned int revmap_arg, - struct irq_host_ops *ops, - irq_hw_number_t inval_irq) -{ - struct irq_host *host; - unsigned int size = sizeof(struct irq_host); - unsigned int i; - unsigned int *rmap; - unsigned long flags; - - /* Allocate structure and revmap table if using linear mapping */ - if (revmap_type == IRQ_HOST_MAP_LINEAR) - size += revmap_arg * sizeof(unsigned int); - host = kzalloc(size, GFP_KERNEL); - if (host == NULL) - return NULL; - - /* Fill structure */ - host->revmap_type = revmap_type; - host->inval_irq = inval_irq; - host->ops = ops; - host->of_node = of_node_get(of_node); - - if (host->ops->match == NULL) - host->ops->match = default_irq_host_match; - - raw_spin_lock_irqsave(&irq_big_lock, flags); - - /* If it's a legacy controller, check for duplicates and - * mark it as allocated (we use irq 0 host pointer for that - */ - if (revmap_type == IRQ_HOST_MAP_LEGACY) { - if (irq_map[0].host != NULL) { - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - of_node_put(host->of_node); - kfree(host); - return NULL; - } - irq_map[0].host = host; - } - - list_add(&host->link, &irq_hosts); - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - - /* Additional setups per revmap type */ - switch(revmap_type) { - case IRQ_HOST_MAP_LEGACY: - /* 0 is always the invalid number for legacy */ - host->inval_irq = 0; - /* setup us as the host for all legacy interrupts */ - for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { - irq_map[i].hwirq = i; - smp_wmb(); - irq_map[i].host = host; - smp_wmb(); - - /* Legacy flags are left to default at this point, - * one can then use irq_create_mapping() to - * explicitly change them - */ - ops->map(host, i, i); - - /* Clear norequest flags */ - irq_clear_status_flags(i, IRQ_NOREQUEST); - } - break; - case IRQ_HOST_MAP_LINEAR: - rmap = (unsigned int *)(host + 1); - for (i = 0; i < revmap_arg; i++) - rmap[i] = NO_IRQ; - host->revmap_data.linear.size = revmap_arg; - smp_wmb(); - host->revmap_data.linear.revmap = rmap; - break; - case IRQ_HOST_MAP_TREE: - INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); - break; - default: - break; - } - - pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); - - return host; -} - -struct irq_host *irq_find_host(struct device_node *node) -{ - struct irq_host *h, *found = NULL; - unsigned long flags; - - /* We might want to match the legacy controller last since - * it might potentially be set to match all interrupts in - * the absence of a device node. This isn't a problem so far - * yet though... - */ - raw_spin_lock_irqsave(&irq_big_lock, flags); - list_for_each_entry(h, &irq_hosts, link) - if (h->ops->match(h, node)) { - found = h; - break; - } - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - return found; -} -EXPORT_SYMBOL_GPL(irq_find_host); - -void irq_set_default_host(struct irq_host *host) -{ - pr_debug("irq: Default host set to @0x%p\n", host); - - irq_default_host = host; -} - -void irq_set_virq_count(unsigned int count) -{ - pr_debug("irq: Trying to set virq count to %d\n", count); - - BUG_ON(count < NUM_ISA_INTERRUPTS); - if (count < NR_IRQS) - irq_virq_count = count; -} - -static int irq_setup_virq(struct irq_host *host, unsigned int virq, - irq_hw_number_t hwirq) -{ - int res; - - res = irq_alloc_desc_at(virq, 0); - if (res != virq) { - pr_debug("irq: -> allocating desc failed\n"); - goto error; - } - - /* map it */ - smp_wmb(); - irq_map[virq].hwirq = hwirq; - smp_mb(); - - if (host->ops->map(host, virq, hwirq)) { - pr_debug("irq: -> mapping failed, freeing\n"); - goto errdesc; - } - - irq_clear_status_flags(virq, IRQ_NOREQUEST); - - return 0; - -errdesc: - irq_free_descs(virq, 1); -error: - irq_free_virt(virq, 1); - return -1; -} - -unsigned int irq_create_direct_mapping(struct irq_host *host) -{ - unsigned int virq; - - if (host == NULL) - host = irq_default_host; - - BUG_ON(host == NULL); - WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); - - virq = irq_alloc_virt(host, 1, 0); - if (virq == NO_IRQ) { - pr_debug("irq: create_direct virq allocation failed\n"); - return NO_IRQ; - } - - pr_debug("irq: create_direct obtained virq %d\n", virq); - - if (irq_setup_virq(host, virq, virq)) - return NO_IRQ; - - return virq; -} - -unsigned int irq_create_mapping(struct irq_host *host, - irq_hw_number_t hwirq) -{ - unsigned int virq, hint; - - pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); - - /* Look for default host if nececssary */ - if (host == NULL) - host = irq_default_host; - if (host == NULL) { - printk(KERN_WARNING "irq_create_mapping called for" - " NULL host, hwirq=%lx\n", hwirq); - WARN_ON(1); - return NO_IRQ; - } - pr_debug("irq: -> using host @%p\n", host); - - /* Check if mapping already exists */ - virq = irq_find_mapping(host, hwirq); - if (virq != NO_IRQ) { - pr_debug("irq: -> existing mapping on virq %d\n", virq); - return virq; - } - - /* Get a virtual interrupt number */ - if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { - /* Handle legacy */ - virq = (unsigned int)hwirq; - if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) - return NO_IRQ; - return virq; - } else { - /* Allocate a virtual interrupt number */ - hint = hwirq % irq_virq_count; - virq = irq_alloc_virt(host, 1, hint); - if (virq == NO_IRQ) { - pr_debug("irq: -> virq allocation failed\n"); - return NO_IRQ; - } - } - - if (irq_setup_virq(host, virq, hwirq)) - return NO_IRQ; - - pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", - hwirq, host->of_node ? host->of_node->full_name : "null", virq); - - return virq; -} -EXPORT_SYMBOL_GPL(irq_create_mapping); - -unsigned int irq_create_of_mapping(struct device_node *controller, - const u32 *intspec, unsigned int intsize) -{ - struct irq_host *host; - irq_hw_number_t hwirq; - unsigned int type = IRQ_TYPE_NONE; - unsigned int virq; - - if (controller == NULL) - host = irq_default_host; - else - host = irq_find_host(controller); - if (host == NULL) { - printk(KERN_WARNING "irq: no irq host found for %s !\n", - controller->full_name); - return NO_IRQ; - } - - /* If host has no translation, then we assume interrupt line */ - if (host->ops->xlate == NULL) - hwirq = intspec[0]; - else { - if (host->ops->xlate(host, controller, intspec, intsize, - &hwirq, &type)) - return NO_IRQ; - } - - /* Create mapping */ - virq = irq_create_mapping(host, hwirq); - if (virq == NO_IRQ) - return virq; - - /* Set type if specified and different than the current one */ - if (type != IRQ_TYPE_NONE && - type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) - irq_set_irq_type(virq, type); - return virq; -} -EXPORT_SYMBOL_GPL(irq_create_of_mapping); - -void irq_dispose_mapping(unsigned int virq) -{ - struct irq_host *host; - irq_hw_number_t hwirq; - - if (virq == NO_IRQ) - return; - - host = irq_map[virq].host; - if (WARN_ON(host == NULL)) - return; - - /* Never unmap legacy interrupts */ - if (host->revmap_type == IRQ_HOST_MAP_LEGACY) - return; - - irq_set_status_flags(virq, IRQ_NOREQUEST); - - /* remove chip and handler */ - irq_set_chip_and_handler(virq, NULL, NULL); - - /* Make sure it's completed */ - synchronize_irq(virq); - - /* Tell the PIC about it */ - if (host->ops->unmap) - host->ops->unmap(host, virq); - smp_mb(); - - /* Clear reverse map */ - hwirq = irq_map[virq].hwirq; - switch(host->revmap_type) { - case IRQ_HOST_MAP_LINEAR: - if (hwirq < host->revmap_data.linear.size) - host->revmap_data.linear.revmap[hwirq] = NO_IRQ; - break; - case IRQ_HOST_MAP_TREE: - mutex_lock(&revmap_trees_mutex); - radix_tree_delete(&host->revmap_data.tree, hwirq); - mutex_unlock(&revmap_trees_mutex); - break; - } - - /* Destroy map */ - smp_mb(); - irq_map[virq].hwirq = host->inval_irq; - - irq_free_descs(virq, 1); - /* Free it */ - irq_free_virt(virq, 1); -} -EXPORT_SYMBOL_GPL(irq_dispose_mapping); - -unsigned int irq_find_mapping(struct irq_host *host, - irq_hw_number_t hwirq) -{ - unsigned int i; - unsigned int hint = hwirq % irq_virq_count; - - /* Look for default host if nececssary */ - if (host == NULL) - host = irq_default_host; - if (host == NULL) - return NO_IRQ; - - /* legacy -> bail early */ - if (host->revmap_type == IRQ_HOST_MAP_LEGACY) - return hwirq; - - /* Slow path does a linear search of the map */ - if (hint < NUM_ISA_INTERRUPTS) - hint = NUM_ISA_INTERRUPTS; - i = hint; - do { - if (irq_map[i].host == host && - irq_map[i].hwirq == hwirq) - return i; - i++; - if (i >= irq_virq_count) - i = NUM_ISA_INTERRUPTS; - } while(i != hint); - return NO_IRQ; -} -EXPORT_SYMBOL_GPL(irq_find_mapping); - #ifdef CONFIG_SMP int irq_choose_cpu(const struct cpumask *mask) { @@ -925,232 +535,11 @@ int irq_choose_cpu(const struct cpumask *mask) } #endif -unsigned int irq_radix_revmap_lookup(struct irq_host *host, - irq_hw_number_t hwirq) -{ - struct irq_map_entry *ptr; - unsigned int virq; - - if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) - return irq_find_mapping(host, hwirq); - - /* - * The ptr returned references the static global irq_map. - * but freeing an irq can delete nodes along the path to - * do the lookup via call_rcu. - */ - rcu_read_lock(); - ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); - rcu_read_unlock(); - - /* - * If found in radix tree, then fine. - * Else fallback to linear lookup - this should not happen in practice - * as it means that we failed to insert the node in the radix tree. - */ - if (ptr) - virq = ptr - irq_map; - else - virq = irq_find_mapping(host, hwirq); - - return virq; -} - -void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, - irq_hw_number_t hwirq) -{ - if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) - return; - - if (virq != NO_IRQ) { - mutex_lock(&revmap_trees_mutex); - radix_tree_insert(&host->revmap_data.tree, hwirq, - &irq_map[virq]); - mutex_unlock(&revmap_trees_mutex); - } -} - -unsigned int irq_linear_revmap(struct irq_host *host, - irq_hw_number_t hwirq) -{ - unsigned int *revmap; - - if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) - return irq_find_mapping(host, hwirq); - - /* Check revmap bounds */ - if (unlikely(hwirq >= host->revmap_data.linear.size)) - return irq_find_mapping(host, hwirq); - - /* Check if revmap was allocated */ - revmap = host->revmap_data.linear.revmap; - if (unlikely(revmap == NULL)) - return irq_find_mapping(host, hwirq); - - /* Fill up revmap with slow path if no mapping found */ - if (unlikely(revmap[hwirq] == NO_IRQ)) - revmap[hwirq] = irq_find_mapping(host, hwirq); - - return revmap[hwirq]; -} - -unsigned int irq_alloc_virt(struct irq_host *host, - unsigned int count, - unsigned int hint) -{ - unsigned long flags; - unsigned int i, j, found = NO_IRQ; - - if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) - return NO_IRQ; - - raw_spin_lock_irqsave(&irq_big_lock, flags); - - /* Use hint for 1 interrupt if any */ - if (count == 1 && hint >= NUM_ISA_INTERRUPTS && - hint < irq_virq_count && irq_map[hint].host == NULL) { - found = hint; - goto hint_found; - } - - /* Look for count consecutive numbers in the allocatable - * (non-legacy) space - */ - for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { - if (irq_map[i].host != NULL) - j = 0; - else - j++; - - if (j == count) { - found = i - count + 1; - break; - } - } - if (found == NO_IRQ) { - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - return NO_IRQ; - } - hint_found: - for (i = found; i < (found + count); i++) { - irq_map[i].hwirq = host->inval_irq; - smp_wmb(); - irq_map[i].host = host; - } - raw_spin_unlock_irqrestore(&irq_big_lock, flags); - return found; -} - -void irq_free_virt(unsigned int virq, unsigned int count) -{ - unsigned long flags; - unsigned int i; - - WARN_ON (virq < NUM_ISA_INTERRUPTS); - WARN_ON (count == 0 || (virq + count) > irq_virq_count); - - if (virq < NUM_ISA_INTERRUPTS) { - if (virq + count < NUM_ISA_INTERRUPTS) - return; - count =- NUM_ISA_INTERRUPTS - virq; - virq = NUM_ISA_INTERRUPTS; - } - - if (count > irq_virq_count || virq > irq_virq_count - count) { - if (virq > irq_virq_count) - return; - count = irq_virq_count - virq; - } - - raw_spin_lock_irqsave(&irq_big_lock, flags); - for (i = virq; i < (virq + count); i++) { - struct irq_host *host; - - host = irq_map[i].host; - irq_map[i].hwirq = host->inval_irq; - smp_wmb(); - irq_map[i].host = NULL; - } - raw_spin_unlock_irqrestore(&irq_big_lock, flags); -} - int arch_early_irq_init(void) { return 0; } -#ifdef CONFIG_VIRQ_DEBUG -static int virq_debug_show(struct seq_file *m, void *private) -{ - unsigned long flags; - struct irq_desc *desc; - const char *p; - static const char none[] = "none"; - void *data; - int i; - - seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", - "chip name", "chip data", "host name"); - - for (i = 1; i < nr_irqs; i++) { - desc = irq_to_desc(i); - if (!desc) - continue; - - raw_spin_lock_irqsave(&desc->lock, flags); - - if (desc->action && desc->action->handler) { - struct irq_chip *chip; - - seq_printf(m, "%5d ", i); - seq_printf(m, "0x%05lx ", irq_map[i].hwirq); - - chip = irq_desc_get_chip(desc); - if (chip && chip->name) - p = chip->name; - else - p = none; - seq_printf(m, "%-15s ", p); - - data = irq_desc_get_chip_data(desc); - seq_printf(m, "0x%16p ", data); - - if (irq_map[i].host && irq_map[i].host->of_node) - p = irq_map[i].host->of_node->full_name; - else - p = none; - seq_printf(m, "%s\n", p); - } - - raw_spin_unlock_irqrestore(&desc->lock, flags); - } - - return 0; -} - -static int virq_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, virq_debug_show, inode->i_private); -} - -static const struct file_operations virq_debug_fops = { - .open = virq_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init irq_debugfs_init(void) -{ - if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, - NULL, &virq_debug_fops) == NULL) - return -ENOMEM; - - return 0; -} -__initcall(irq_debugfs_init); -#endif /* CONFIG_VIRQ_DEBUG */ - #ifdef CONFIG_PPC64 static int __init setup_noirqdistrib(char *str) { diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index 9f09319352c0..ca3a062ed1b9 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c @@ -21,7 +21,7 @@ #include <asm/prom.h> static struct device_node *cpld_pic_node; -static struct irq_host *cpld_pic_host; +static struct irq_domain *cpld_pic_host; /* * Bits to ignore in the misc_status register @@ -123,13 +123,13 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) } static int -cpld_pic_host_match(struct irq_host *h, struct device_node *node) +cpld_pic_host_match(struct irq_domain *h, struct device_node *node) { return cpld_pic_node == node; } static int -cpld_pic_host_map(struct irq_host *h, unsigned int virq, +cpld_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_status_flags(virq, IRQ_LEVEL); @@ -137,8 +137,7 @@ cpld_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct -irq_host_ops cpld_pic_host_ops = { +static const struct irq_domain_ops cpld_pic_host_ops = { .match = cpld_pic_host_match, .map = cpld_pic_host_map, }; @@ -191,8 +190,7 @@ mpc5121_ads_cpld_pic_init(void) cpld_pic_node = of_node_get(np); - cpld_pic_host = - irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 16, &cpld_pic_host_ops, 16); + cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL); if (!cpld_pic_host) { printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); goto end; diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 96f85e5e0cd3..17d91b7da315 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -45,7 +45,7 @@ static struct of_device_id mpc5200_gpio_ids[] __initdata = { struct media5200_irq { void __iomem *regs; spinlock_t lock; - struct irq_host *irqhost; + struct irq_domain *irqhost; }; struct media5200_irq media5200_irq; @@ -112,7 +112,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) raw_spin_unlock(&desc->lock); } -static int media5200_irq_map(struct irq_host *h, unsigned int virq, +static int media5200_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); @@ -122,7 +122,7 @@ static int media5200_irq_map(struct irq_host *h, unsigned int virq, return 0; } -static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct, +static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -136,7 +136,7 @@ static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops media5200_irq_ops = { +static const struct irq_domain_ops media5200_irq_ops = { .map = media5200_irq_map, .xlate = media5200_irq_xlate, }; @@ -173,15 +173,12 @@ static void __init media5200_init_irq(void) spin_lock_init(&media5200_irq.lock); - media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_HOST_MAP_LINEAR, - MEDIA5200_NUM_IRQS, - &media5200_irq_ops, -1); + media5200_irq.irqhost = irq_domain_add_linear(fpga_np, + MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq); if (!media5200_irq.irqhost) goto out; pr_debug("%s: allocated irqhost\n", __func__); - media5200_irq.irqhost->host_data = &media5200_irq; - irq_set_handler_data(cascade_virq, &media5200_irq); irq_set_chained_handler(cascade_virq, media5200_irq_cascade); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index f94f06e52762..028470b95886 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -81,7 +81,7 @@ MODULE_LICENSE("GPL"); * @regs: virtual address of GPT registers * @lock: spinlock to coordinate between different functions. * @gc: gpio_chip instance structure; used when GPIO is enabled - * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported + * @irqhost: Pointer to irq_domain instance; used when IRQ mode is supported * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates * if the timer is actively used as wdt which blocks gpt functions @@ -91,7 +91,7 @@ struct mpc52xx_gpt_priv { struct device *dev; struct mpc52xx_gpt __iomem *regs; spinlock_t lock; - struct irq_host *irqhost; + struct irq_domain *irqhost; u32 ipb_freq; u8 wdt_mode; @@ -204,7 +204,7 @@ void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) } } -static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, +static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct mpc52xx_gpt_priv *gpt = h->host_data; @@ -216,7 +216,7 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, return 0; } -static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, +static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -236,7 +236,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops mpc52xx_gpt_irq_ops = { +static const struct irq_domain_ops mpc52xx_gpt_irq_ops = { .map = mpc52xx_gpt_irq_map, .xlate = mpc52xx_gpt_irq_xlate, }; @@ -252,14 +252,12 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) if (!cascade_virq) return; - gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, - &mpc52xx_gpt_irq_ops, -1); + gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt); if (!gpt->irqhost) { - dev_err(gpt->dev, "irq_alloc_host() failed\n"); + dev_err(gpt->dev, "irq_domain_add_linear() failed\n"); return; } - gpt->irqhost->host_data = gpt; irq_set_handler_data(cascade_virq, gpt); irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 1a9a49570579..8520b58a5e9a 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -132,7 +132,7 @@ static struct of_device_id mpc52xx_sdma_ids[] __initdata = { static struct mpc52xx_intr __iomem *intr; static struct mpc52xx_sdma __iomem *sdma; -static struct irq_host *mpc52xx_irqhost = NULL; +static struct irq_domain *mpc52xx_irqhost = NULL; static unsigned char mpc52xx_map_senses[4] = { IRQ_TYPE_LEVEL_HIGH, @@ -301,7 +301,7 @@ static int mpc52xx_is_extirq(int l1, int l2) /** * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property */ -static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, +static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -335,7 +335,7 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, /** * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure */ -static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, +static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t irq) { int l1irq; @@ -384,7 +384,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops mpc52xx_irqhost_ops = { +static const struct irq_domain_ops mpc52xx_irqhost_ops = { .xlate = mpc52xx_irqhost_xlate, .map = mpc52xx_irqhost_map, }; @@ -444,9 +444,9 @@ void __init mpc52xx_init_irq(void) * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ - mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, + mpc52xx_irqhost = irq_domain_add_linear(picnode, MPC52xx_IRQ_HIGHTESTHWIRQ, - &mpc52xx_irqhost_ops, -1); + &mpc52xx_irqhost_ops, NULL); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 8ccf9ed62fe2..328d221fd1c0 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c @@ -29,7 +29,7 @@ static DEFINE_RAW_SPINLOCK(pci_pic_lock); struct pq2ads_pci_pic { struct device_node *node; - struct irq_host *host; + struct irq_domain *host; struct { u32 stat; @@ -103,7 +103,7 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) } } -static int pci_pic_host_map(struct irq_host *h, unsigned int virq, +static int pci_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_status_flags(virq, IRQ_LEVEL); @@ -112,14 +112,14 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops pci_pic_host_ops = { +static const struct irq_domain_ops pci_pic_host_ops = { .map = pci_pic_host_map, }; int __init pq2ads_pci_init_irq(void) { struct pq2ads_pci_pic *priv; - struct irq_host *host; + struct irq_domain *host; struct device_node *np; int ret = -ENODEV; int irq; @@ -156,17 +156,13 @@ int __init pq2ads_pci_init_irq(void) out_be32(&priv->regs->mask, ~0); mb(); - host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS, - &pci_pic_host_ops, NUM_IRQS); + host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv); if (!host) { ret = -ENOMEM; goto out_unmap_regs; } - host->host_data = priv; - priv->host = host; - host->host_data = priv; irq_set_handler_data(irq, priv); irq_set_chained_handler(irq, pq2ads_pci_irq_demux); diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 12cb9bb2cc68..3bbbf7489487 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -51,7 +51,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = { static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); static void __iomem *socrates_fpga_pic_iobase; -static struct irq_host *socrates_fpga_pic_irq_host; +static struct irq_domain *socrates_fpga_pic_irq_host; static unsigned int socrates_fpga_irqs[3]; static inline uint32_t socrates_fpga_pic_read(int reg) @@ -227,7 +227,7 @@ static struct irq_chip socrates_fpga_pic_chip = { .irq_set_type = socrates_fpga_pic_set_type, }; -static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, +static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { /* All interrupts are LEVEL sensitive */ @@ -238,7 +238,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int socrates_fpga_pic_host_xlate(struct irq_host *h, +static int socrates_fpga_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { @@ -269,7 +269,7 @@ static int socrates_fpga_pic_host_xlate(struct irq_host *h, return 0; } -static struct irq_host_ops socrates_fpga_pic_host_ops = { +static const struct irq_domain_ops socrates_fpga_pic_host_ops = { .map = socrates_fpga_pic_host_map, .xlate = socrates_fpga_pic_host_xlate, }; @@ -279,10 +279,9 @@ void socrates_fpga_pic_init(struct device_node *pic) unsigned long flags; int i; - /* Setup an irq_host structure */ - socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_HOST_MAP_LINEAR, - SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, - SOCRATES_FPGA_NUM_IRQS); + /* Setup an irq_domain structure */ + socrates_fpga_pic_irq_host = irq_domain_add_linear(pic, + SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL); if (socrates_fpga_pic_irq_host == NULL) { pr_err("FPGA PIC: Unable to allocate host\n"); return; diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c index 94594e58594c..af3fd697de82 100644 --- a/arch/powerpc/platforms/86xx/gef_pic.c +++ b/arch/powerpc/platforms/86xx/gef_pic.c @@ -50,7 +50,7 @@ static DEFINE_RAW_SPINLOCK(gef_pic_lock); static void __iomem *gef_pic_irq_reg_base; -static struct irq_host *gef_pic_irq_host; +static struct irq_domain *gef_pic_irq_host; static int gef_pic_cascade_irq; /* @@ -153,7 +153,7 @@ static struct irq_chip gef_pic_chip = { /* When an interrupt is being configured, this call allows some flexibilty * in deciding which irq_chip structure is used */ -static int gef_pic_host_map(struct irq_host *h, unsigned int virq, +static int gef_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { /* All interrupts are LEVEL sensitive */ @@ -163,7 +163,7 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct, +static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { @@ -177,7 +177,7 @@ static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops gef_pic_host_ops = { +static const struct irq_domain_ops gef_pic_host_ops = { .map = gef_pic_host_map, .xlate = gef_pic_host_xlate, }; @@ -211,10 +211,9 @@ void __init gef_pic_init(struct device_node *np) return; } - /* Setup an irq_host structure */ - gef_pic_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, - GEF_PIC_NUM_IRQS, - &gef_pic_host_ops, NO_IRQ); + /* Setup an irq_domain structure */ + gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS, + &gef_pic_host_ops, NULL); if (gef_pic_irq_host == NULL) return; diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 40a6e34793b4..db360fc4cf0e 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -67,7 +67,7 @@ struct axon_msic { - struct irq_host *irq_host; + struct irq_domain *irq_domain; __le32 *fifo_virt; dma_addr_t fifo_phys; dcr_host_t dcr_host; @@ -152,7 +152,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) static struct axon_msic *find_msi_translator(struct pci_dev *dev) { - struct irq_host *irq_host; + struct irq_domain *irq_domain; struct device_node *dn, *tmp; const phandle *ph; struct axon_msic *msic = NULL; @@ -184,14 +184,14 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev) goto out_error; } - irq_host = irq_find_host(dn); - if (!irq_host) { - dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n", + irq_domain = irq_find_host(dn); + if (!irq_domain) { + dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n", dn->full_name); goto out_error; } - msic = irq_host->host_data; + msic = irq_domain->host_data; out_error: of_node_put(dn); @@ -280,7 +280,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) BUILD_BUG_ON(NR_IRQS > 65536); list_for_each_entry(entry, &dev->msi_list, list) { - virq = irq_create_direct_mapping(msic->irq_host); + virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { dev_warn(&dev->dev, "axon_msi: virq allocation failed!\n"); @@ -318,7 +318,7 @@ static struct irq_chip msic_irq_chip = { .name = "AXON-MSI", }; -static int msic_host_map(struct irq_host *h, unsigned int virq, +static int msic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); @@ -327,7 +327,7 @@ static int msic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops msic_host_ops = { +static const struct irq_domain_ops msic_host_ops = { .map = msic_host_map, }; @@ -337,7 +337,7 @@ static void axon_msi_shutdown(struct platform_device *device) u32 tmp; pr_devel("axon_msi: disabling %s\n", - msic->irq_host->of_node->full_name); + msic->irq_domain->of_node->full_name); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); @@ -392,16 +392,13 @@ static int axon_msi_probe(struct platform_device *device) } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, - NR_IRQS, &msic_host_ops, 0); - if (!msic->irq_host) { - printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n", + msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic); + if (!msic->irq_domain) { + printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); goto out_free_fifo; } - msic->irq_host->host_data = msic; - irq_set_handler_data(virq, msic); irq_set_chained_handler(virq, axon_msi_cascade); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 55015e1f6939..e5c3a2c6090d 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -34,7 +34,7 @@ static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; -static struct irq_host *beatic_host; +static struct irq_domain *beatic_host; /* * In this implementation, "virq" == "IRQ plug number", @@ -122,7 +122,7 @@ static struct irq_chip beatic_pic = { * * Note that the number (virq) is already assigned at upper layer. */ -static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) +static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq) { beat_destruct_irq_plug(virq); } @@ -133,7 +133,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) * * Note that the number (virq) is already assigned at upper layer. */ -static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, +static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { int64_t err; @@ -154,7 +154,7 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, * Called from irq_create_of_mapping() only. * Note: We have only 1 entry to translate. */ -static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, +static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -166,13 +166,13 @@ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) +static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np) { /* Match all */ return 1; } -static struct irq_host_ops beatic_pic_host_ops = { +static const struct irq_domain_ops beatic_pic_host_ops = { .map = beatic_pic_host_map, .unmap = beatic_pic_host_unmap, .xlate = beatic_pic_host_xlate, @@ -239,9 +239,7 @@ void __init beatic_init_IRQ(void) ppc_md.get_irq = beatic_get_irq; /* Allocate an irq host */ - beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, - &beatic_pic_host_ops, - 0); + beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL); BUG_ON(beatic_host == NULL); irq_set_default_host(beatic_host); } diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 96a433dd2d64..2d42f3bb66d6 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -56,7 +56,7 @@ struct iic { static DEFINE_PER_CPU(struct iic, cpu_iic); #define IIC_NODE_COUNT 2 -static struct irq_host *iic_host; +static struct irq_domain *iic_host; /* Convert between "pending" bits and hw irq number */ static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) @@ -186,7 +186,7 @@ void iic_message_pass(int cpu, int msg) out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); } -struct irq_host *iic_get_irq_host(int node) +struct irq_domain *iic_get_irq_host(int node) { return iic_host; } @@ -222,13 +222,13 @@ void iic_request_IPIs(void) #endif /* CONFIG_SMP */ -static int iic_host_match(struct irq_host *h, struct device_node *node) +static int iic_host_match(struct irq_domain *h, struct device_node *node) { return of_device_is_compatible(node, "IBM,CBEA-Internal-Interrupt-Controller"); } -static int iic_host_map(struct irq_host *h, unsigned int virq, +static int iic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { switch (hw & IIC_IRQ_TYPE_MASK) { @@ -245,7 +245,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int iic_host_xlate(struct irq_host *h, struct device_node *ct, +static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -285,7 +285,7 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops iic_host_ops = { +static const struct irq_domain_ops iic_host_ops = { .match = iic_host_match, .map = iic_host_map, .xlate = iic_host_xlate, @@ -378,8 +378,8 @@ static int __init setup_iic(void) void __init iic_init_IRQ(void) { /* Setup an irq host data structure */ - iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, - &iic_host_ops, IIC_IRQ_INVALID); + iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops, + NULL); BUG_ON(iic_host == NULL); irq_set_default_host(iic_host); diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 442c28c00f88..d8b7cc8a66ca 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -62,7 +62,7 @@ enum { #define SPIDER_IRQ_INVALID 63 struct spider_pic { - struct irq_host *host; + struct irq_domain *host; void __iomem *regs; unsigned int node_id; }; @@ -168,7 +168,7 @@ static struct irq_chip spider_pic = { .irq_set_type = spider_set_irq_type, }; -static int spider_host_map(struct irq_host *h, unsigned int virq, +static int spider_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); @@ -180,7 +180,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int spider_host_xlate(struct irq_host *h, struct device_node *ct, +static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -194,7 +194,7 @@ static int spider_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops spider_host_ops = { +static const struct irq_domain_ops spider_host_ops = { .map = spider_host_map, .xlate = spider_host_xlate, }; @@ -299,12 +299,10 @@ static void __init spider_init_one(struct device_node *of_node, int chip, panic("spider_pic: can't map registers !"); /* Allocate a host */ - pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR, - SPIDER_SRC_COUNT, &spider_host_ops, - SPIDER_IRQ_INVALID); + pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT, + &spider_host_ops, pic); if (pic->host == NULL) panic("spider_pic: can't allocate irq host !"); - pic->host->host_data = pic; /* Go through all sources and disable them */ for (i = 0; i < SPIDER_SRC_COUNT; i++) { diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index f61a2dd96b99..53d6eee01963 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -96,9 +96,9 @@ static struct irq_chip flipper_pic = { * */ -static struct irq_host *flipper_irq_host; +static struct irq_domain *flipper_irq_host; -static int flipper_pic_map(struct irq_host *h, unsigned int virq, +static int flipper_pic_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { irq_set_chip_data(virq, h->host_data); @@ -107,13 +107,13 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq, return 0; } -static int flipper_pic_match(struct irq_host *h, struct device_node *np) +static int flipper_pic_match(struct irq_domain *h, struct device_node *np) { return 1; } -static struct irq_host_ops flipper_irq_host_ops = { +static const struct irq_domain_ops flipper_irq_domain_ops = { .map = flipper_pic_map, .match = flipper_pic_match, }; @@ -130,10 +130,10 @@ static void __flipper_quiesce(void __iomem *io_base) out_be32(io_base + FLIPPER_ICR, 0xffffffff); } -struct irq_host * __init flipper_pic_init(struct device_node *np) +struct irq_domain * __init flipper_pic_init(struct device_node *np) { struct device_node *pi; - struct irq_host *irq_host = NULL; + struct irq_domain *irq_domain = NULL; struct resource res; void __iomem *io_base; int retval; @@ -159,17 +159,15 @@ struct irq_host * __init flipper_pic_init(struct device_node *np) __flipper_quiesce(io_base); - irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS, - &flipper_irq_host_ops, -1); - if (!irq_host) { - pr_err("failed to allocate irq_host\n"); + irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS, + &flipper_irq_domain_ops, io_base); + if (!irq_domain) { + pr_err("failed to allocate irq_domain\n"); return NULL; } - irq_host->host_data = io_base; - out: - return irq_host; + return irq_domain; } unsigned int flipper_pic_get_irq(void) diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index e4919170c6bc..3006b5117ec6 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -89,9 +89,9 @@ static struct irq_chip hlwd_pic = { * */ -static struct irq_host *hlwd_irq_host; +static struct irq_domain *hlwd_irq_host; -static int hlwd_pic_map(struct irq_host *h, unsigned int virq, +static int hlwd_pic_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { irq_set_chip_data(virq, h->host_data); @@ -100,11 +100,11 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops hlwd_irq_host_ops = { +static const struct irq_domain_ops hlwd_irq_domain_ops = { .map = hlwd_pic_map, }; -static unsigned int __hlwd_pic_get_irq(struct irq_host *h) +static unsigned int __hlwd_pic_get_irq(struct irq_domain *h) { void __iomem *io_base = h->host_data; int irq; @@ -123,14 +123,14 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); - struct irq_host *irq_host = irq_get_handler_data(cascade_virq); + struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq); unsigned int virq; raw_spin_lock(&desc->lock); chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ raw_spin_unlock(&desc->lock); - virq = __hlwd_pic_get_irq(irq_host); + virq = __hlwd_pic_get_irq(irq_domain); if (virq != NO_IRQ) generic_handle_irq(virq); else @@ -155,9 +155,9 @@ static void __hlwd_quiesce(void __iomem *io_base) out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff); } -struct irq_host *hlwd_pic_init(struct device_node *np) +struct irq_domain *hlwd_pic_init(struct device_node *np) { - struct irq_host *irq_host; + struct irq_domain *irq_domain; struct resource res; void __iomem *io_base; int retval; @@ -177,15 +177,14 @@ struct irq_host *hlwd_pic_init(struct device_node *np) __hlwd_quiesce(io_base); - irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, HLWD_NR_IRQS, - &hlwd_irq_host_ops, -1); - if (!irq_host) { - pr_err("failed to allocate irq_host\n"); + irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS, + &hlwd_irq_domain_ops, io_base); + if (!irq_domain) { + pr_err("failed to allocate irq_domain\n"); return NULL; } - irq_host->host_data = io_base; - return irq_host; + return irq_domain; } unsigned int hlwd_pic_get_irq(void) @@ -200,7 +199,7 @@ unsigned int hlwd_pic_get_irq(void) void hlwd_pic_probe(void) { - struct irq_host *host; + struct irq_domain *host; struct device_node *np; const u32 *interrupts; int cascade_virq; diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index b2103453eb01..05ce5164cafc 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -342,7 +342,7 @@ unsigned int iSeries_get_irq(void) #ifdef CONFIG_PCI -static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, +static int iseries_irq_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); @@ -350,13 +350,13 @@ static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int iseries_irq_host_match(struct irq_host *h, struct device_node *np) +static int iseries_irq_host_match(struct irq_domain *h, struct device_node *np) { /* Match all */ return 1; } -static struct irq_host_ops iseries_irq_host_ops = { +static const struct irq_domain_ops iseries_irq_domain_ops = { .map = iseries_irq_host_map, .match = iseries_irq_host_match, }; @@ -368,7 +368,7 @@ static struct irq_host_ops iseries_irq_host_ops = { void __init iSeries_init_IRQ(void) { /* Register PCI event handler and open an event path */ - struct irq_host *host; + struct irq_domain *host; int ret; /* @@ -380,8 +380,7 @@ void __init iSeries_init_IRQ(void) /* Create irq host. No need for a revmap since HV will give us * back our virtual irq number */ - host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, - &iseries_irq_host_ops, 0); + host = irq_domain_add_nomap(NULL, &iseries_irq_domain_ops, NULL); BUG_ON(host == NULL); irq_set_default_host(host); diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 7761aabfc293..92afc382a49e 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -61,7 +61,7 @@ static DEFINE_RAW_SPINLOCK(pmac_pic_lock); static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; static int pmac_irq_cascade = -1; -static struct irq_host *pmac_pic_host; +static struct irq_domain *pmac_pic_host; static void __pmac_retrigger(unsigned int irq_nr) { @@ -268,13 +268,13 @@ static struct irqaction gatwick_cascade_action = { .name = "cascade", }; -static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) +static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node) { /* We match all, we don't always have a node anyway */ return 1; } -static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, +static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { if (hw >= max_irqs) @@ -288,21 +288,10 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, - unsigned int *out_flags) - -{ - *out_flags = IRQ_TYPE_NONE; - *out_hwirq = *intspec; - return 0; -} - -static struct irq_host_ops pmac_pic_host_ops = { +static const struct irq_domain_ops pmac_pic_host_ops = { .match = pmac_pic_host_match, .map = pmac_pic_host_map, - .xlate = pmac_pic_host_xlate, + .xlate = irq_domain_xlate_onecell, }; static void __init pmac_pic_probe_oldstyle(void) @@ -352,9 +341,8 @@ static void __init pmac_pic_probe_oldstyle(void) /* * Allocate an irq host */ - pmac_pic_host = irq_alloc_host(master, IRQ_HOST_MAP_LINEAR, max_irqs, - &pmac_pic_host_ops, - max_irqs); + pmac_pic_host = irq_domain_add_linear(master, max_irqs, + &pmac_pic_host_ops, NULL); BUG_ON(pmac_pic_host == NULL); irq_set_default_host(pmac_pic_host); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 44d769258ebf..a81e5a88fbdf 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -125,7 +125,7 @@ static volatile u32 __iomem *psurge_start; static int psurge_type = PSURGE_NONE; /* irq for secondary cpus to report */ -static struct irq_host *psurge_host; +static struct irq_domain *psurge_host; int psurge_secondary_virq; /* @@ -176,7 +176,7 @@ static void smp_psurge_cause_ipi(int cpu, unsigned long data) psurge_set_ipi(cpu); } -static int psurge_host_map(struct irq_host *h, unsigned int virq, +static int psurge_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); @@ -184,7 +184,7 @@ static int psurge_host_map(struct irq_host *h, unsigned int virq, return 0; } -struct irq_host_ops psurge_host_ops = { +static const struct irq_domain_ops psurge_host_ops = { .map = psurge_host_map, }; @@ -192,8 +192,7 @@ static int psurge_secondary_ipi_init(void) { int rc = -ENOMEM; - psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, - &psurge_host_ops, 0); + psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL); if (psurge_host) psurge_secondary_virq = irq_create_direct_mapping(psurge_host); diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 617efa12a3a5..2a4ff86cc21f 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c @@ -667,7 +667,7 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd, static void dump_bmp(struct ps3_private* pd) {}; #endif /* defined(DEBUG) */ -static int ps3_host_map(struct irq_host *h, unsigned int virq, +static int ps3_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, @@ -678,13 +678,13 @@ static int ps3_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int ps3_host_match(struct irq_host *h, struct device_node *np) +static int ps3_host_match(struct irq_domain *h, struct device_node *np) { /* Match all */ return 1; } -static struct irq_host_ops ps3_host_ops = { +static const struct irq_domain_ops ps3_host_ops = { .map = ps3_host_map, .match = ps3_host_match, }; @@ -751,10 +751,9 @@ void __init ps3_init_IRQ(void) { int result; unsigned cpu; - struct irq_host *host; + struct irq_domain *host; - host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &ps3_host_ops, - PS3_INVALID_OUTLET); + host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL); irq_set_default_host(host); irq_set_virq_count(PS3_PLUG_MAX + 1); diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c index 19f353dfcd03..cb565bf93650 100644 --- a/arch/powerpc/platforms/wsp/opb_pic.c +++ b/arch/powerpc/platforms/wsp/opb_pic.c @@ -30,7 +30,7 @@ static int opb_index = 0; struct opb_pic { - struct irq_host *host; + struct irq_domain *host; void *regs; int index; spinlock_t lock; @@ -179,7 +179,7 @@ static struct irq_chip opb_irq_chip = { .irq_set_type = opb_set_irq_type }; -static int opb_host_map(struct irq_host *host, unsigned int virq, +static int opb_host_map(struct irq_domain *host, unsigned int virq, irq_hw_number_t hwirq) { struct opb_pic *opb; @@ -196,20 +196,9 @@ static int opb_host_map(struct irq_host *host, unsigned int virq, return 0; } -static int opb_host_xlate(struct irq_host *host, struct device_node *dn, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type) -{ - /* Interrupt size must == 2 */ - BUG_ON(intsize != 2); - *out_hwirq = intspec[0]; - *out_type = intspec[1]; - return 0; -} - -static struct irq_host_ops opb_host_ops = { +static const struct irq_domain_ops opb_host_ops = { .map = opb_host_map, - .xlate = opb_host_xlate, + .xlate = irq_domain_xlate_twocell, }; irqreturn_t opb_irq_handler(int irq, void *private) @@ -263,13 +252,11 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn) goto free_opb; } - /* Allocate an irq host so that Linux knows that despite only + /* Allocate an irq domain so that Linux knows that despite only * having one interrupt to issue, we're the controller for multiple * hardware IRQs, so later we can lookup their virtual IRQs. */ - opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR, - OPB_NR_IRQS, &opb_host_ops, -1); - + opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb); if (!opb->host) { printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); goto free_regs; @@ -277,7 +264,6 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn) opb->index = opb_index++; spin_lock_init(&opb->lock); - opb->host->host_data = opb; /* Disable all interrupts by default */ opb_out(opb, OPB_MLSASIER, 0); diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 5d7d59a43c4c..d4fa03f2b6ac 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -54,7 +54,7 @@ cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */ immap_t __iomem *mpc8xx_immr; static cpic8xx_t __iomem *cpic_reg; -static struct irq_host *cpm_pic_host; +static struct irq_domain *cpm_pic_host; static void cpm_mask_irq(struct irq_data *d) { @@ -98,7 +98,7 @@ int cpm_get_irq(void) return irq_linear_revmap(cpm_pic_host, cpm_vec); } -static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, +static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); @@ -123,7 +123,7 @@ static struct irqaction cpm_error_irqaction = { .name = "error", }; -static struct irq_host_ops cpm_pic_host_ops = { +static const struct irq_domain_ops cpm_pic_host_ops = { .map = cpm_pic_host_map, }; @@ -164,8 +164,7 @@ unsigned int cpm_pic_init(void) out_be32(&cpic_reg->cpic_cimr, 0); - cpm_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, - 64, &cpm_pic_host_ops, 64); + cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL); if (cpm_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); sirq = NO_IRQ; diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index bcab50e2a9eb..d3be961e2ae7 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -50,7 +50,7 @@ static intctl_cpm2_t __iomem *cpm2_intctl; -static struct irq_host *cpm2_pic_host; +static struct irq_domain *cpm2_pic_host; #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; @@ -214,7 +214,7 @@ unsigned int cpm2_get_irq(void) return irq_linear_revmap(cpm2_pic_host, irq); } -static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, +static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); @@ -224,21 +224,9 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) -{ - *out_hwirq = intspec[0]; - if (intsize > 1) - *out_flags = intspec[1]; - else - *out_flags = IRQ_TYPE_NONE; - return 0; -} - -static struct irq_host_ops cpm2_pic_host_ops = { +static const struct irq_domain_ops cpm2_pic_host_ops = { .map = cpm2_pic_host_map, - .xlate = cpm2_pic_host_xlate, + .xlate = irq_domain_xlate_onetwocell, }; void cpm2_pic_init(struct device_node *node) @@ -275,8 +263,7 @@ void cpm2_pic_init(struct device_node *node) out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); /* create a legacy host */ - cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, - 64, &cpm2_pic_host_ops, 64); + cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL); if (cpm2_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); return; diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index b6731e4a6646..6e0e1005227f 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c @@ -182,13 +182,13 @@ unsigned int ehv_pic_get_irq(void) return irq_linear_revmap(global_ehv_pic->irqhost, irq); } -static int ehv_pic_host_match(struct irq_host *h, struct device_node *node) +static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node) { /* Exact match, unless ehv_pic node is NULL */ return h->of_node == NULL || h->of_node == node; } -static int ehv_pic_host_map(struct irq_host *h, unsigned int virq, +static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct ehv_pic *ehv_pic = h->host_data; @@ -217,7 +217,7 @@ static int ehv_pic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct, +static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -248,7 +248,7 @@ static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops ehv_pic_host_ops = { +static const struct irq_domain_ops ehv_pic_host_ops = { .match = ehv_pic_host_match, .map = ehv_pic_host_map, .xlate = ehv_pic_host_xlate, @@ -275,9 +275,8 @@ void __init ehv_pic_init(void) return; } - ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, - NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0); - + ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS, + &ehv_pic_host_ops, ehv_pic); if (!ehv_pic->irqhost) { of_node_put(np); kfree(ehv_pic); @@ -293,7 +292,6 @@ void __init ehv_pic_init(void) of_node_put(np2); } - ehv_pic->irqhost->host_data = ehv_pic; ehv_pic->hc_irq = ehv_pic_irq_chip; ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity; ehv_pic->coreint_flag = coreint_flag; diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index ecb5c1946d22..0c01debe963b 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -60,7 +60,7 @@ static struct irq_chip fsl_msi_chip = { .name = "FSL-MSI", }; -static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, +static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct fsl_msi *msi_data = h->host_data; @@ -74,7 +74,7 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops fsl_msi_host_ops = { +static const struct irq_domain_ops fsl_msi_host_ops = { .map = fsl_msi_host_map, }; @@ -387,8 +387,8 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) } platform_set_drvdata(dev, msi); - msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, - NR_MSI_IRQS, &fsl_msi_host_ops, 0); + msi->irqhost = irq_domain_add_linear(dev->dev.of_node, + NR_MSI_IRQS, &fsl_msi_host_ops, msi); if (msi->irqhost == NULL) { dev_err(&dev->dev, "No memory for MSI irqhost\n"); @@ -420,8 +420,6 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev) msi->feature = features->fsl_pic_ip; - msi->irqhost->host_data = msi; - /* * Remember the phandle, so that we can match with any PCI nodes * that have an "fsl,msi" property. diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h index f6c646a52541..8225f8653f78 100644 --- a/arch/powerpc/sysdev/fsl_msi.h +++ b/arch/powerpc/sysdev/fsl_msi.h @@ -26,7 +26,7 @@ #define FSL_PIC_IP_VMPIC 0x00000003 struct fsl_msi { - struct irq_host *irqhost; + struct irq_domain *irqhost; unsigned long cascade_irq; diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index d18bb27e4df9..997df6a7ab5d 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -25,7 +25,7 @@ static unsigned char cached_8259[2] = { 0xff, 0xff }; static DEFINE_RAW_SPINLOCK(i8259_lock); -static struct irq_host *i8259_host; +static struct irq_domain *i8259_host; /* * Acknowledge the IRQ using either the PCI host bridge's interrupt @@ -163,12 +163,12 @@ static struct resource pic_edgectrl_iores = { .flags = IORESOURCE_BUSY, }; -static int i8259_host_match(struct irq_host *h, struct device_node *node) +static int i8259_host_match(struct irq_domain *h, struct device_node *node) { return h->of_node == NULL || h->of_node == node; } -static int i8259_host_map(struct irq_host *h, unsigned int virq, +static int i8259_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); @@ -185,7 +185,7 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, +static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { @@ -205,13 +205,13 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops i8259_host_ops = { +static struct irq_domain_ops i8259_host_ops = { .match = i8259_host_match, .map = i8259_host_map, .xlate = i8259_host_xlate, }; -struct irq_host *i8259_get_host(void) +struct irq_domain *i8259_get_host(void) { return i8259_host; } @@ -263,8 +263,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr) raw_spin_unlock_irqrestore(&i8259_lock, flags); /* create a legacy host */ - i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, - 0, &i8259_host_ops, 0); + i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL); if (i8259_host == NULL) { printk(KERN_ERR "i8259: failed to allocate irq host !\n"); return; diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 95da897f05a7..b50f97811c25 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -672,13 +672,13 @@ static struct irq_chip ipic_edge_irq_chip = { .irq_set_type = ipic_set_irq_type, }; -static int ipic_host_match(struct irq_host *h, struct device_node *node) +static int ipic_host_match(struct irq_domain *h, struct device_node *node) { /* Exact match, unless ipic node is NULL */ return h->of_node == NULL || h->of_node == node; } -static int ipic_host_map(struct irq_host *h, unsigned int virq, +static int ipic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct ipic *ipic = h->host_data; @@ -692,26 +692,10 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int ipic_host_xlate(struct irq_host *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_flags) - -{ - /* interrupt sense values coming from the device tree equal either - * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change) - */ - *out_hwirq = intspec[0]; - if (intsize > 1) - *out_flags = intspec[1]; - else - *out_flags = IRQ_TYPE_NONE; - return 0; -} - -static struct irq_host_ops ipic_host_ops = { +static struct irq_domain_ops ipic_host_ops = { .match = ipic_host_match, .map = ipic_host_map, - .xlate = ipic_host_xlate, + .xlate = irq_domain_xlate_onetwocell, }; struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) @@ -728,9 +712,8 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) if (ipic == NULL) return NULL; - ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, - NR_IPIC_INTS, - &ipic_host_ops, 0); + ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS, + &ipic_host_ops, ipic); if (ipic->irqhost == NULL) { kfree(ipic); return NULL; @@ -738,8 +721,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) ipic->regs = ioremap(res.start, resource_size(&res)); - ipic->irqhost->host_data = ipic; - /* init hw */ ipic_write(ipic->regs, IPIC_SICNR, 0x0); diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h index 9391c57b0c51..90031d1282e1 100644 --- a/arch/powerpc/sysdev/ipic.h +++ b/arch/powerpc/sysdev/ipic.h @@ -43,7 +43,7 @@ struct ipic { volatile u32 __iomem *regs; /* The remapper for this IPIC */ - struct irq_host *irqhost; + struct irq_domain *irqhost; }; struct ipic_info { diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index 2ca0a85fcce9..d5f5416be310 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -17,7 +17,7 @@ extern int cpm_get_irq(struct pt_regs *regs); -static struct irq_host *mpc8xx_pic_host; +static struct irq_domain *mpc8xx_pic_host; #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; static sysconf8xx_t __iomem *siu_reg; @@ -110,7 +110,7 @@ unsigned int mpc8xx_get_irq(void) } -static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, +static int mpc8xx_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); @@ -121,7 +121,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, } -static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct, +static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { @@ -142,7 +142,7 @@ static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct, } -static struct irq_host_ops mpc8xx_pic_host_ops = { +static struct irq_domain_ops mpc8xx_pic_host_ops = { .map = mpc8xx_pic_host_map, .xlate = mpc8xx_pic_host_xlate, }; @@ -171,8 +171,7 @@ int mpc8xx_pic_init(void) goto out; } - mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, - 64, &mpc8xx_pic_host_ops, 64); + mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL); if (mpc8xx_pic_host == NULL) { printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); ret = -ENOMEM; diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 4e9ccb1015de..c83a512fa175 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -965,13 +965,13 @@ static struct irq_chip mpic_irq_ht_chip = { #endif /* CONFIG_MPIC_U3_HT_IRQS */ -static int mpic_host_match(struct irq_host *h, struct device_node *node) +static int mpic_host_match(struct irq_domain *h, struct device_node *node) { /* Exact match, unless mpic node is NULL */ return h->of_node == NULL || h->of_node == node; } -static int mpic_host_map(struct irq_host *h, unsigned int virq, +static int mpic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct mpic *mpic = h->host_data; @@ -1041,7 +1041,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, +static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -1121,13 +1121,13 @@ static void mpic_cascade(unsigned int irq, struct irq_desc *desc) BUG_ON(!(mpic->flags & MPIC_SECONDARY)); virq = mpic_get_one_irq(mpic); - if (virq != NO_IRQ) + if (virq) generic_handle_irq(virq); chip->irq_eoi(&desc->irq_data); } -static struct irq_host_ops mpic_host_ops = { +static struct irq_domain_ops mpic_host_ops = { .match = mpic_host_match, .map = mpic_host_map, .xlate = mpic_host_xlate, @@ -1345,10 +1345,9 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; - mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR, + mpic->irqhost = irq_domain_add_linear(mpic->node, isu_size ? isu_size : mpic->num_sources, - &mpic_host_ops, - flags & MPIC_LARGE_VECTORS ? 2048 : 256); + &mpic_host_ops, mpic); /* * FIXME: The code leaks the MPIC object and mappings here; this @@ -1357,8 +1356,6 @@ struct mpic * __init mpic_alloc(struct device_node *node, if (mpic->irqhost == NULL) return NULL; - mpic->irqhost->host_data = mpic; - /* Display version */ switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { case 1: diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c index 0f67cd79d481..0622aa91b18a 100644 --- a/arch/powerpc/sysdev/mpic_msi.c +++ b/arch/powerpc/sysdev/mpic_msi.c @@ -32,7 +32,7 @@ void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq) static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) { irq_hw_number_t hwirq; - struct irq_host_ops *ops = mpic->irqhost->ops; + const struct irq_domain_ops *ops = mpic->irqhost->ops; struct device_node *np; int flags, index, i; struct of_irq oirq; diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c index 14d130268e7a..8848e99a83f2 100644 --- a/arch/powerpc/sysdev/mv64x60_pic.c +++ b/arch/powerpc/sysdev/mv64x60_pic.c @@ -70,7 +70,7 @@ static u32 mv64x60_cached_low_mask; static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS; static u32 mv64x60_cached_gpp_mask; -static struct irq_host *mv64x60_irq_host; +static struct irq_domain *mv64x60_irq_host; /* * mv64x60_chip_low functions @@ -208,7 +208,7 @@ static struct irq_chip *mv64x60_chips[] = { [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp, }; -static int mv64x60_host_map(struct irq_host *h, unsigned int virq, +static int mv64x60_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { int level1; @@ -223,7 +223,7 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops mv64x60_host_ops = { +static struct irq_domain_ops mv64x60_host_ops = { .map = mv64x60_host_map, }; @@ -250,9 +250,8 @@ void __init mv64x60_init_irq(void) paddr = of_translate_address(np, reg); mv64x60_irq_reg_base = ioremap(paddr, reg[1]); - mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, - MV64x60_NUM_IRQS, - &mv64x60_host_ops, MV64x60_NUM_IRQS); + mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS, + &mv64x60_host_ops, NULL); spin_lock_irqsave(&mv64x60_lock, flags); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 73034bd203c4..2fba6ef2f95e 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -245,13 +245,13 @@ static struct irq_chip qe_ic_irq_chip = { .irq_mask_ack = qe_ic_mask_irq, }; -static int qe_ic_host_match(struct irq_host *h, struct device_node *node) +static int qe_ic_host_match(struct irq_domain *h, struct device_node *node) { /* Exact match, unless qe_ic node is NULL */ return h->of_node == NULL || h->of_node == node; } -static int qe_ic_host_map(struct irq_host *h, unsigned int virq, +static int qe_ic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct qe_ic *qe_ic = h->host_data; @@ -272,23 +272,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct, - const u32 * intspec, unsigned int intsize, - irq_hw_number_t * out_hwirq, - unsigned int *out_flags) -{ - *out_hwirq = intspec[0]; - if (intsize > 1) - *out_flags = intspec[1]; - else - *out_flags = IRQ_TYPE_NONE; - return 0; -} - -static struct irq_host_ops qe_ic_host_ops = { +static struct irq_domain_ops qe_ic_host_ops = { .match = qe_ic_host_match, .map = qe_ic_host_map, - .xlate = qe_ic_host_xlate, + .xlate = irq_domain_xlate_onetwocell, }; /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ @@ -339,8 +326,8 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags, if (qe_ic == NULL) return; - qe_ic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, - NR_QE_IC_INTS, &qe_ic_host_ops, 0); + qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, + &qe_ic_host_ops, qe_ic); if (qe_ic->irqhost == NULL) { kfree(qe_ic); return; @@ -348,7 +335,6 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags, qe_ic->regs = ioremap(res.start, resource_size(&res)); - qe_ic->irqhost->host_data = qe_ic; qe_ic->hc_irq = qe_ic_irq_chip; qe_ic->virq_high = irq_of_parse_and_map(node, 0); diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h index c1361d005a8a..c327872ed35c 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.h +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h @@ -79,7 +79,7 @@ struct qe_ic { volatile u32 __iomem *regs; /* The remapper for this QEIC */ - struct irq_host *irqhost; + struct irq_domain *irqhost; /* The "linux" controller struct */ struct irq_chip hc_irq; diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 4d18658116e5..188012c58f7f 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -51,7 +51,7 @@ u32 tsi108_pci_cfg_base; static u32 tsi108_pci_cfg_phys; u32 tsi108_csr_vir_base; -static struct irq_host *pci_irq_host; +static struct irq_domain *pci_irq_host; extern u32 get_vir_csrbase(void); extern u32 tsi108_read_reg(u32 reg_offset); @@ -376,7 +376,7 @@ static struct irq_chip tsi108_pci_irq = { .irq_unmask = tsi108_pci_irq_unmask, }; -static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, +static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { @@ -385,7 +385,7 @@ static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static int pci_irq_host_map(struct irq_host *h, unsigned int virq, +static int pci_irq_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { unsigned int irq; DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); @@ -397,7 +397,7 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops pci_irq_host_ops = { +static struct irq_domain_ops pci_irq_domain_ops = { .map = pci_irq_host_map, .xlate = pci_irq_host_xlate, }; @@ -419,10 +419,9 @@ void __init tsi108_pci_int_init(struct device_node *node) { DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); - pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, - 0, &pci_irq_host_ops, 0); + pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL); if (pci_irq_host == NULL) { - printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); + printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); return; } diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 063c901b1265..92033936a8f7 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -49,7 +49,7 @@ struct uic { raw_spinlock_t lock; /* The remapper for this UIC */ - struct irq_host *irqhost; + struct irq_domain *irqhost; }; static void uic_unmask_irq(struct irq_data *d) @@ -174,7 +174,7 @@ static struct irq_chip uic_irq_chip = { .irq_set_type = uic_set_irq_type, }; -static int uic_host_map(struct irq_host *h, unsigned int virq, +static int uic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct uic *uic = h->host_data; @@ -190,21 +190,9 @@ static int uic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int uic_host_xlate(struct irq_host *h, struct device_node *ct, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type) - -{ - /* UIC intspecs must have 2 cells */ - BUG_ON(intsize != 2); - *out_hwirq = intspec[0]; - *out_type = intspec[1]; - return 0; -} - -static struct irq_host_ops uic_host_ops = { +static struct irq_domain_ops uic_host_ops = { .map = uic_host_map, - .xlate = uic_host_xlate, + .xlate = irq_domain_xlate_twocell, }; void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) @@ -270,13 +258,11 @@ static struct uic * __init uic_init_one(struct device_node *node) } uic->dcrbase = *dcrreg; - uic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, - NR_UIC_INTS, &uic_host_ops, -1); + uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, + uic); if (! uic->irqhost) return NULL; /* FIXME: panic? */ - uic->irqhost->host_data = uic; - /* Start with all interrupts disabled, level and non-critical */ mtdcr(uic->dcrbase + UIC_ER, 0); mtdcr(uic->dcrbase + UIC_CR, 0); diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index d72eda6a4c05..ea5e204e3450 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -40,7 +40,7 @@ unsigned int xics_interrupt_server_size = 8; DEFINE_PER_CPU(struct xics_cppr, xics_cppr); -struct irq_host *xics_host; +struct irq_domain *xics_host; static LIST_HEAD(ics_list); @@ -212,16 +212,16 @@ void xics_migrate_irqs_away(void) /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; - if (!virq_is_host(virq, xics_host)) - continue; - irq = (unsigned int)virq_to_hw(virq); - /* We need to get IPIs still. */ - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) - continue; desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ if (!desc || !desc->action) continue; + if (desc->irq_data.domain != xics_host) + continue; + irq = desc->irq_data.hwirq; + /* We need to get IPIs still. */ + if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + continue; chip = irq_desc_get_chip(desc); if (!chip || !chip->irq_set_affinity) continue; @@ -301,7 +301,7 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, } #endif /* CONFIG_SMP */ -static int xics_host_match(struct irq_host *h, struct device_node *node) +static int xics_host_match(struct irq_domain *h, struct device_node *node) { struct ics *ics; @@ -323,7 +323,7 @@ static struct irq_chip xics_ipi_chip = { .irq_unmask = xics_ipi_unmask, }; -static int xics_host_map(struct irq_host *h, unsigned int virq, +static int xics_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct ics *ics; @@ -351,7 +351,7 @@ static int xics_host_map(struct irq_host *h, unsigned int virq, return -EINVAL; } -static int xics_host_xlate(struct irq_host *h, struct device_node *ct, +static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -366,7 +366,7 @@ static int xics_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops xics_host_ops = { +static struct irq_domain_ops xics_host_ops = { .match = xics_host_match, .map = xics_host_map, .xlate = xics_host_xlate, @@ -374,8 +374,7 @@ static struct irq_host_ops xics_host_ops = { static void __init xics_init_host(void) { - xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, - XICS_IRQ_SPURIOUS); + xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL); BUG_ON(xics_host == NULL); irq_set_default_host(xics_host); } diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 6183799754af..8d73c3c0bee6 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c @@ -40,7 +40,7 @@ #define XINTC_IVR 24 /* Interrupt Vector */ #define XINTC_MER 28 /* Master Enable */ -static struct irq_host *master_irqhost; +static struct irq_domain *master_irqhost; #define XILINX_INTC_MAXIRQS (32) @@ -141,7 +141,7 @@ static struct irq_chip xilinx_intc_edge_irqchip = { /** * xilinx_intc_xlate - translate virq# from device tree interrupts property */ -static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, +static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -161,7 +161,7 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static int xilinx_intc_map(struct irq_host *h, unsigned int virq, +static int xilinx_intc_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t irq) { irq_set_chip_data(virq, h->host_data); @@ -177,15 +177,15 @@ static int xilinx_intc_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops xilinx_intc_ops = { +static struct irq_domain_ops xilinx_intc_ops = { .map = xilinx_intc_map, .xlate = xilinx_intc_xlate, }; -struct irq_host * __init +struct irq_domain * __init xilinx_intc_init(struct device_node *np) { - struct irq_host * irq; + struct irq_domain * irq; void * regs; /* Find and map the intc registers */ @@ -200,12 +200,11 @@ xilinx_intc_init(struct device_node *np) out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */ out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ - /* Allocate and initialize an irq_host structure. */ - irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS, - &xilinx_intc_ops, -1); + /* Allocate and initialize an irq_domain structure. */ + irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops, + regs); if (!irq) panic(__FILE__ ": Cannot allocate IRQ host\n"); - irq->host_data = regs; return irq; } diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 3c8db65c89e5..713fb58ca507 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -859,6 +859,7 @@ config PCI depends on SYS_SUPPORTS_PCI select PCI_DOMAINS select GENERIC_PCI_IOMAP + select NO_GENERIC_PCI_IOPORT_MAP help Find out whether you have a PCI motherboard. PCI is the name of a bus system, i.e. the way the CPU talks to the other stuff inside diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 8f18dd090a66..1e7b0e2e764d 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c @@ -356,8 +356,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #ifndef CONFIG_GENERIC_IOMAP -static void __iomem *ioport_map_pci(struct pci_dev *dev, - unsigned long port, unsigned int nr) +void __iomem *__pci_ioport_map(struct pci_dev *dev, + unsigned long port, unsigned int nr) { struct pci_channel *chan = dev->sysdata; diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 96657992a72e..ca5580e4d813 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -33,6 +33,7 @@ config SPARC config SPARC32 def_bool !64BIT select GENERIC_ATOMIC64 + select CLZ_TAB config SPARC64 def_bool 64BIT diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index edd3d3cde460..c28765110706 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -22,6 +22,7 @@ #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/atomic.h> +#include <linux/irqdomain.h> #define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 #define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 @@ -55,15 +56,6 @@ struct resource; extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name); extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size); -/* These routines are here to provide compatibility with how powerpc - * handles IRQ mapping for OF device nodes. We precompute and permanently - * register them in the platform_device objects, whereas powerpc computes them - * on request. - */ -static inline void irq_dispose_mapping(unsigned int virq) -{ -} - extern struct device_node *of_console_device; extern char *of_console_path; extern char *of_console_options; diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S index 681b3683da9e..d74bc0925f2d 100644 --- a/arch/sparc/lib/divdi3.S +++ b/arch/sparc/lib/divdi3.S @@ -17,23 +17,9 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ - .data - .align 8 - .globl __clz_tab -__clz_tab: - .byte 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 - .byte 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6 - .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 - .byte 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 - .byte 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 - .size __clz_tab,256 - .global .udiv - .text .align 4 + .global .udiv .globl __divdi3 __divdi3: save %sp,-104,%sp diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5bed94e189fa..e0829a6a4660 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -398,6 +398,7 @@ config X86_INTEL_CE select X86_REBOOTFIXUPS select OF select OF_EARLY_FLATTREE + select IRQ_DOMAIN ---help--- Select for the Intel CE media processor (CE4100) SOC. This option compiles in support for the CE4100 SOC for settop @@ -2076,6 +2077,7 @@ config OLPC select GPIOLIB select OF select OF_PROMTREE + select IRQ_DOMAIN ---help--- Add support for detecting the unique features of the OLPC XO hardware. diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 0c9fa2745f13..b3b733262909 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -145,13 +145,13 @@ extern void __add_wrong_size(void) #ifdef __HAVE_ARCH_CMPXCHG #define cmpxchg(ptr, old, new) \ - __cmpxchg((ptr), (old), (new), sizeof(*ptr)) + __cmpxchg(ptr, old, new, sizeof(*(ptr))) #define sync_cmpxchg(ptr, old, new) \ - __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) + __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) #define cmpxchg_local(ptr, old, new) \ - __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) + __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) #endif /* diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 6919e936345b..a29571821b99 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -29,7 +29,7 @@ extern unsigned int sig_xstate_size; extern void fpu_init(void); extern void mxcsr_feature_mask_init(void); extern int init_fpu(struct task_struct *child); -extern asmlinkage void math_state_restore(void); +extern void math_state_restore(void); extern void __math_state_restore(void); extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); @@ -307,9 +307,54 @@ static inline void __clear_fpu(struct task_struct *tsk) } } +/* + * Were we in an interrupt that interrupted kernel mode? + * + * We can do a kernel_fpu_begin/end() pair *ONLY* if that + * pair does nothing at all: TS_USEDFPU must be clear (so + * that we don't try to save the FPU state), and TS must + * be set (so that the clts/stts pair does nothing that is + * visible in the interrupted kernel thread). + */ +static inline bool interrupted_kernel_fpu_idle(void) +{ + return !(current_thread_info()->status & TS_USEDFPU) && + (read_cr0() & X86_CR0_TS); +} + +/* + * Were we in user mode (or vm86 mode) when we were + * interrupted? + * + * Doing kernel_fpu_begin/end() is ok if we are running + * in an interrupt context from user mode - we'll just + * save the FPU state as required. + */ +static inline bool interrupted_user_mode(void) +{ + struct pt_regs *regs = get_irq_regs(); + return regs && user_mode_vm(regs); +} + +/* + * Can we use the FPU in kernel mode with the + * whole "kernel_fpu_begin/end()" sequence? + * + * It's always ok in process context (ie "not interrupt") + * but it is sometimes ok even from an irq. + */ +static inline bool irq_fpu_usable(void) +{ + return !in_interrupt() || + interrupted_user_mode() || + interrupted_kernel_fpu_idle(); +} + static inline void kernel_fpu_begin(void) { struct thread_info *me = current_thread_info(); + + WARN_ON_ONCE(!irq_fpu_usable()); preempt_disable(); if (me->status & TS_USEDFPU) __save_init_fpu(me->task); @@ -323,14 +368,6 @@ static inline void kernel_fpu_end(void) preempt_enable(); } -static inline bool irq_fpu_usable(void) -{ - struct pt_regs *regs; - - return !in_interrupt() || !(regs = get_irq_regs()) || \ - user_mode(regs) || (read_cr0() & X86_CR0_TS); -} - /* * Some instructions like VIA's padlock instructions generate a spurious * DNA fault but don't modify SSE registers. And these instructions @@ -367,6 +404,7 @@ static inline void irq_ts_restore(int TS_state) */ static inline void save_init_fpu(struct task_struct *tsk) { + WARN_ON_ONCE(task_thread_info(tsk)->status & TS_USEDFPU); preempt_disable(); __save_init_fpu(tsk); stts(); diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h deleted file mode 100644 index 423bbbddf36d..000000000000 --- a/arch/x86/include/asm/irq_controller.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __IRQ_CONTROLLER__ -#define __IRQ_CONTROLLER__ - -struct irq_domain { - int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize, - u32 *out_hwirq, u32 *out_type); - void *priv; - struct device_node *controller; - struct list_head l; -}; - -#endif diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index ab4092e3214e..7b9cfc4878af 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -190,6 +190,9 @@ struct x86_emulate_ops { int (*intercept)(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage); + + bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); }; typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -298,6 +301,19 @@ struct x86_emulate_ctxt { #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ X86EMUL_MODE_PROT64) +/* CPUID vendors */ +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 + +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41 +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 + +#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 +#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e +#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 + enum x86_intercept_stage { X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ X86_ICPT_PRE_EXCEPT, diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index 644dd885f05a..60bef663609a 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h @@ -21,7 +21,6 @@ #include <asm/irq.h> #include <linux/atomic.h> #include <asm/setup.h> -#include <asm/irq_controller.h> #ifdef CONFIG_OF extern int of_ioapic; @@ -43,15 +42,6 @@ extern char cmd_line[COMMAND_LINE_SIZE]; #define pci_address_to_pio pci_address_to_pio unsigned long pci_address_to_pio(phys_addr_t addr); -/** - * irq_dispose_mapping - Unmap an interrupt - * @virq: linux virq number of the interrupt to unmap - * - * FIXME: We really should implement proper virq handling like power, - * but that's going to be major surgery. - */ -static inline void irq_dispose_mapping(unsigned int virq) { } - #define HAVE_ARCH_DEVTREE_FIXUPS #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 73da6b64f5b7..d6bd49faa40c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -439,7 +439,6 @@ void intel_pmu_pebs_enable(struct perf_event *event) hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; cpuc->pebs_enabled |= 1ULL << hwc->idx; - WARN_ON_ONCE(cpuc->enabled); if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) intel_pmu_lbr_enable(event); diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 3fab3de3ce96..47a7e63bfe54 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -72,8 +72,6 @@ void intel_pmu_lbr_enable(struct perf_event *event) if (!x86_pmu.lbr_nr) return; - WARN_ON_ONCE(cpuc->enabled); - /* * Reset the LBR stack if we changed task context to * avoid data leaks. diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 52821799a702..3ae2ced4a874 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -4,6 +4,7 @@ #include <linux/bootmem.h> #include <linux/export.h> #include <linux/io.h> +#include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/of.h> @@ -17,64 +18,14 @@ #include <linux/initrd.h> #include <asm/hpet.h> -#include <asm/irq_controller.h> #include <asm/apic.h> #include <asm/pci_x86.h> __initdata u64 initial_dtb; char __initdata cmd_line[COMMAND_LINE_SIZE]; -static LIST_HEAD(irq_domains); -static DEFINE_RAW_SPINLOCK(big_irq_lock); int __initdata of_ioapic; -#ifdef CONFIG_X86_IO_APIC -static void add_interrupt_host(struct irq_domain *ih) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&big_irq_lock, flags); - list_add(&ih->l, &irq_domains); - raw_spin_unlock_irqrestore(&big_irq_lock, flags); -} -#endif - -static struct irq_domain *get_ih_from_node(struct device_node *controller) -{ - struct irq_domain *ih, *found = NULL; - unsigned long flags; - - raw_spin_lock_irqsave(&big_irq_lock, flags); - list_for_each_entry(ih, &irq_domains, l) { - if (ih->controller == controller) { - found = ih; - break; - } - } - raw_spin_unlock_irqrestore(&big_irq_lock, flags); - return found; -} - -unsigned int irq_create_of_mapping(struct device_node *controller, - const u32 *intspec, unsigned int intsize) -{ - struct irq_domain *ih; - u32 virq, type; - int ret; - - ih = get_ih_from_node(controller); - if (!ih) - return 0; - ret = ih->xlate(ih, intspec, intsize, &virq, &type); - if (ret) - return 0; - if (type == IRQ_TYPE_NONE) - return virq; - irq_set_irq_type(virq, type); - return virq; -} -EXPORT_SYMBOL_GPL(irq_create_of_mapping); - unsigned long pci_address_to_pio(phys_addr_t address) { /* @@ -354,36 +305,43 @@ static struct of_ioapic_type of_ioapic_type[] = }, }; -static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, - u32 *out_hwirq, u32 *out_type) +static int ioapic_xlate(struct irq_domain *domain, + struct device_node *controller, + const u32 *intspec, u32 intsize, + irq_hw_number_t *out_hwirq, u32 *out_type) { - struct mp_ioapic_gsi *gsi_cfg; struct io_apic_irq_attr attr; struct of_ioapic_type *it; - u32 line, idx, type; + u32 line, idx; + int rc; - if (intsize < 2) + if (WARN_ON(intsize < 2)) return -EINVAL; - line = *intspec; - idx = (u32) id->priv; - gsi_cfg = mp_ioapic_gsi_routing(idx); - *out_hwirq = line + gsi_cfg->gsi_base; - - intspec++; - type = *intspec; + line = intspec[0]; - if (type >= ARRAY_SIZE(of_ioapic_type)) + if (intspec[1] >= ARRAY_SIZE(of_ioapic_type)) return -EINVAL; - it = of_ioapic_type + type; - *out_type = it->out_type; + it = &of_ioapic_type[intspec[1]]; + idx = (u32) domain->host_data; set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); - return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr); + rc = io_apic_setup_irq_pin_once(irq_find_mapping(domain, line), + cpu_to_node(0), &attr); + if (rc) + return rc; + + *out_hwirq = line; + *out_type = it->out_type; + return 0; } +const struct irq_domain_ops ioapic_irq_domain_ops = { + .xlate = ioapic_xlate, +}; + static void __init ioapic_add_ofnode(struct device_node *np) { struct resource r; @@ -399,13 +357,14 @@ static void __init ioapic_add_ofnode(struct device_node *np) for (i = 0; i < nr_ioapics; i++) { if (r.start == mpc_ioapic_addr(i)) { struct irq_domain *id; + struct mp_ioapic_gsi *gsi_cfg; + + gsi_cfg = mp_ioapic_gsi_routing(i); - id = kzalloc(sizeof(*id), GFP_KERNEL); + id = irq_domain_add_legacy(np, 32, gsi_cfg->gsi_base, 0, + &ioapic_irq_domain_ops, + (void*)i); BUG_ON(!id); - id->controller = np; - id->xlate = ioapic_xlate; - id->priv = (void *)i; - add_interrupt_host(id); return; } } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 1aae78f775fc..4025fe4f928f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -252,7 +252,8 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) unsigned short ss; unsigned long sp; #endif - printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); + printk(KERN_DEFAULT + "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 6d728d9284bd..17107bd6e1f0 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -129,7 +129,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, if (!stack) { if (regs) stack = (unsigned long *)regs->sp; - else if (task && task != current) + else if (task != current) stack = (unsigned long *)task->thread.sp; else stack = &dummy; @@ -269,11 +269,11 @@ void show_registers(struct pt_regs *regs) unsigned char c; u8 *ip; - printk(KERN_EMERG "Stack:\n"); + printk(KERN_DEFAULT "Stack:\n"); show_stack_log_lvl(NULL, regs, (unsigned long *)sp, - 0, KERN_EMERG); + 0, KERN_DEFAULT); - printk(KERN_EMERG "Code: "); + printk(KERN_DEFAULT "Code: "); ip = (u8 *)regs->ip - code_prologue; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 37a458b521a6..d840e69a853c 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -39,6 +39,14 @@ static int reboot_mode; enum reboot_type reboot_type = BOOT_ACPI; int reboot_force; +/* This variable is used privately to keep track of whether or not + * reboot_type is still set to its default value (i.e., reboot= hasn't + * been set on the command line). This is needed so that we can + * suppress DMI scanning for reboot quirks. Without it, it's + * impossible to override a faulty reboot quirk without recompiling. + */ +static int reboot_default = 1; + #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) static int reboot_cpu = -1; #endif @@ -67,6 +75,12 @@ bool port_cf9_safe = false; static int __init reboot_setup(char *str) { for (;;) { + /* Having anything passed on the command line via + * reboot= will cause us to disable DMI checking + * below. + */ + reboot_default = 0; + switch (*str) { case 'w': reboot_mode = 0x1234; @@ -295,14 +309,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, - { /* Handle problems with rebooting on VersaLogic Menlow boards */ - .callback = set_bios_reboot, - .ident = "VersaLogic Menlow based board", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), - DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), - }, - }, { /* Handle reboot issue on Acer Aspire one */ .callback = set_kbd_reboot, .ident = "Acer Aspire One A110", @@ -316,7 +322,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { static int __init reboot_init(void) { - dmi_check_system(reboot_dmi_table); + /* Only do the DMI check if reboot_type hasn't been overridden + * on the command line + */ + if (reboot_default) { + dmi_check_system(reboot_dmi_table); + } return 0; } core_initcall(reboot_init); @@ -465,7 +476,12 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { static int __init pci_reboot_init(void) { - dmi_check_system(pci_reboot_dmi_table); + /* Only do the DMI check if reboot_type hasn't been overridden + * on the command line + */ + if (reboot_default) { + dmi_check_system(pci_reboot_dmi_table); + } return 0; } core_initcall(pci_reboot_init); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 482ec3af2067..8ba27dbc107a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -599,10 +599,10 @@ void __math_state_restore(void) * Careful.. There are problems with IBM-designed IRQ13 behaviour. * Don't touch unless you *really* know how it works. * - * Must be called with kernel preemption disabled (in this case, - * local interrupts are disabled at the call-site in entry.S). + * Must be called with kernel preemption disabled (eg with local + * local interrupts as in the case of do_device_not_available). */ -asmlinkage void math_state_restore(void) +void math_state_restore(void) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = thread->task; @@ -631,6 +631,7 @@ EXPORT_SYMBOL_GPL(math_state_restore); dotraplinkage void __kprobes do_device_not_available(struct pt_regs *regs, long error_code) { + WARN_ON_ONCE(!user_mode_vm(regs)); #ifdef CONFIG_MATH_EMULATION if (read_cr0() & X86_CR0_EM) { struct math_emu_info info = { }; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 05a562b85025..0982507b962a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, ss->p = 1; } +static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) +{ + struct x86_emulate_ops *ops = ctxt->ops; + u32 eax, ebx, ecx, edx; + + /* + * syscall should always be enabled in longmode - so only become + * vendor specific (cpuid) if other modes are active... + */ + if (ctxt->mode == X86EMUL_MODE_PROT64) + return true; + + eax = 0x00000000; + ecx = 0x00000000; + if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { + /* + * Intel ("GenuineIntel") + * remark: Intel CPUs only support "syscall" in 64bit + * longmode. Also an 64bit guest with a + * 32bit compat-app running will #UD !! While this + * behaviour can be fixed (by emulating) into AMD + * response - CPUs of AMD can't behave like Intel. + */ + if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && + ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && + edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) + return false; + + /* AMD ("AuthenticAMD") */ + if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && + ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && + edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) + return true; + + /* AMD ("AMDisbetter!") */ + if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && + ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && + edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) + return true; + } + + /* default: (not Intel, not AMD), apply Intel's stricter rules... */ + return false; +} + static int em_syscall(struct x86_emulate_ctxt *ctxt) { struct x86_emulate_ops *ops = ctxt->ops; @@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); + if (!(em_syscall_is_enabled(ctxt))) + return emulate_ud(ctxt); + ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, &cs, &ss); + if (!(efer & EFER_SCE)) + return emulate_ud(ctxt); + ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 14d6cadc4ba6..9cbfc0698118 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { + bool pr = false; + switch (msr) { case MSR_EFER: return set_efer(vcpu, data); @@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; + case MSR_P6_PERFCTR0: + case MSR_P6_PERFCTR1: + pr = true; + case MSR_P6_EVNTSEL0: + case MSR_P6_EVNTSEL1: + if (kvm_pmu_msr(vcpu, msr)) + return kvm_pmu_set_msr(vcpu, msr, data); + + if (pr || data != 0) + pr_unimpl(vcpu, "disabled perfctr wrmsr: " + "0x%x data 0x%llx\n", msr, data); + break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. @@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_FAM10H_MMIO_CONF_BASE: data = 0; break; + case MSR_P6_PERFCTR0: + case MSR_P6_PERFCTR1: + case MSR_P6_EVNTSEL0: + case MSR_P6_EVNTSEL1: + if (kvm_pmu_msr(vcpu, msr)) + return kvm_pmu_get_msr(vcpu, msr, pdata); + data = 0; + break; case MSR_IA32_UCODE_REV: data = 0x100000000ULL; break; @@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } +static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) +{ + struct kvm_cpuid_entry2 *cpuid = NULL; + + if (eax && ecx) + cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), + *eax, *ecx); + + if (cpuid) { + *eax = cpuid->eax; + *ecx = cpuid->ecx; + if (ebx) + *ebx = cpuid->ebx; + if (edx) + *edx = cpuid->edx; + return true; + } + + return false; +} + static struct x86_emulate_ops emulate_ops = { .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, @@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = { .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, + .get_cpuid = emulator_get_cpuid, }; static void cache_all_regs(struct kvm_vcpu *vcpu) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9d74824a708d..f0b4caf85c1a 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -673,7 +673,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, stackend = end_of_stack(tsk); if (tsk != &init_task && *stackend != STACK_END_MAGIC) - printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); + printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); tsk->thread.cr2 = address; tsk->thread.trap_no = 14; @@ -684,7 +684,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, sig = 0; /* Executive summary in case the body of the oops scrolled away */ - printk(KERN_EMERG "CR2: %016lx\n", address); + printk(KERN_DEFAULT "CR2: %016lx\n", address); oops_end(flags, regs, sig); } diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h index 5fb8c27cbef5..405a8c49ff2c 100644 --- a/arch/xtensa/include/asm/string.h +++ b/arch/xtensa/include/asm/string.h @@ -118,7 +118,4 @@ extern void *memmove(void *__dest, __const__ void *__src, size_t __n); /* Don't build bcopy at all ... */ #define __HAVE_ARCH_BCOPY -#define __HAVE_ARCH_MEMSCAN -#define memscan memchr - #endif /* _XTENSA_STRING_H */ |