diff options
author | Dave Airlie <airlied@redhat.com> | 2015-04-20 11:32:26 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-04-20 13:05:20 +1000 |
commit | 2c33ce009ca2389dbf0535d0672214d09738e35e (patch) | |
tree | 6186a6458c3c160385d794a23eaf07c786a9e61b /arch/sparc | |
parent | cec32a47010647e8b0603726ebb75b990a4057a4 (diff) | |
parent | 09d51602cf84a1264946711dd4ea0dddbac599a1 (diff) | |
download | linux-stable-2c33ce009ca2389dbf0535d0672214d09738e35e.tar.gz linux-stable-2c33ce009ca2389dbf0535d0672214d09738e35e.tar.bz2 linux-stable-2c33ce009ca2389dbf0535d0672214d09738e35e.zip |
Merge Linus master into drm-next
The merge is clean, but the arm build fails afterwards,
due to API changes in the regulator tree.
I've included the patch into the merge to fix the build.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/Kconfig | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/iommu_64.h | 7 | ||||
-rw-r--r-- | arch/sparc/include/asm/jump_label.h | 5 | ||||
-rw-r--r-- | arch/sparc/include/asm/seccomp.h | 11 | ||||
-rw-r--r-- | arch/sparc/include/asm/thread_info_32.h | 15 | ||||
-rw-r--r-- | arch/sparc/include/asm/thread_info_64.h | 26 | ||||
-rw-r--r-- | arch/sparc/kernel/iommu.c | 172 | ||||
-rw-r--r-- | arch/sparc/kernel/iommu_common.h | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/ldc.c | 153 | ||||
-rw-r--r-- | arch/sparc/kernel/leon_pci.c | 16 | ||||
-rw-r--r-- | arch/sparc/kernel/mdesc.c | 22 | ||||
-rw-r--r-- | arch/sparc/kernel/pci.c | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 183 | ||||
-rw-r--r-- | arch/sparc/kernel/pcic.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/time_32.c | 6 | ||||
-rw-r--r-- | arch/sparc/kernel/traps_32.c | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/traps_64.c | 2 |
17 files changed, 247 insertions, 396 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index efb00ec75805..e49502acbab4 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -146,6 +146,10 @@ config GENERIC_ISA_DMA config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y if SPARC64 +config PGTABLE_LEVELS + default 4 if 64BIT + default 3 + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h index 2b9321ab064d..cd0d69fa7592 100644 --- a/arch/sparc/include/asm/iommu_64.h +++ b/arch/sparc/include/asm/iommu_64.h @@ -16,6 +16,7 @@ #define IOPTE_WRITE 0x0000000000000002UL #define IOMMU_NUM_CTXS 4096 +#include <linux/iommu-common.h> struct iommu_arena { unsigned long *map; @@ -24,11 +25,10 @@ struct iommu_arena { }; struct iommu { + struct iommu_map_table tbl; spinlock_t lock; - struct iommu_arena arena; - void (*flush_all)(struct iommu *); + u32 dma_addr_mask; iopte_t *page_table; - u32 page_table_map_base; unsigned long iommu_control; unsigned long iommu_tsbbase; unsigned long iommu_flush; @@ -40,7 +40,6 @@ struct iommu { unsigned long dummy_page_pa; unsigned long ctx_lowest_free; DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS); - u32 dma_addr_mask; }; struct strbuf { diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index ec2e2e2aba7d..cc9b04a2b11b 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h @@ -1,7 +1,7 @@ #ifndef _ASM_SPARC_JUMP_LABEL_H #define _ASM_SPARC_JUMP_LABEL_H -#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ #include <linux/types.h> @@ -22,8 +22,6 @@ l_yes: return true; } -#endif /* __KERNEL__ */ - typedef u32 jump_label_t; struct jump_entry { @@ -32,4 +30,5 @@ struct jump_entry { jump_label_t key; }; +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/sparc/include/asm/seccomp.h b/arch/sparc/include/asm/seccomp.h index adca1bce41d4..5ef8826d44f8 100644 --- a/arch/sparc/include/asm/seccomp.h +++ b/arch/sparc/include/asm/seccomp.h @@ -1,15 +1,10 @@ #ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H #include <linux/unistd.h> -#define __NR_seccomp_read __NR_read -#define __NR_seccomp_write __NR_write -#define __NR_seccomp_exit __NR_exit -#define __NR_seccomp_sigreturn __NR_rt_sigreturn - -#define __NR_seccomp_read_32 __NR_read -#define __NR_seccomp_write_32 __NR_write -#define __NR_seccomp_exit_32 __NR_exit #define __NR_seccomp_sigreturn_32 __NR_sigreturn +#include <asm-generic/seccomp.h> + #endif /* _ASM_SECCOMP_H */ diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index fd7bd0a440ca..229475f0d7ce 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -27,7 +27,6 @@ struct thread_info { unsigned long uwinmask; struct task_struct *task; /* main task structure */ - struct exec_domain *exec_domain; /* execution domain */ unsigned long flags; /* low level flags */ int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, @@ -35,6 +34,8 @@ struct thread_info { int softirq_count; int hardirq_count; + u32 __unused; + /* Context switch saved kernel state. */ unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */ unsigned long kpc; @@ -56,7 +57,6 @@ struct thread_info { { \ .uwinmask = 0, \ .task = &tsk, \ - .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ @@ -85,12 +85,11 @@ register struct thread_info *current_thread_info_reg asm("g6"); */ #define TI_UWINMASK 0x00 /* uwinmask */ #define TI_TASK 0x04 -#define TI_EXECDOMAIN 0x08 /* exec_domain */ -#define TI_FLAGS 0x0c -#define TI_CPU 0x10 -#define TI_PREEMPT 0x14 /* preempt_count */ -#define TI_SOFTIRQ 0x18 /* softirq_count */ -#define TI_HARDIRQ 0x1c /* hardirq_count */ +#define TI_FLAGS 0x08 +#define TI_CPU 0x0c +#define TI_PREEMPT 0x10 /* preempt_count */ +#define TI_SOFTIRQ 0x14 /* softirq_count */ +#define TI_HARDIRQ 0x18 /* hardirq_count */ #define TI_KSP 0x20 /* ksp */ #define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */ #define TI_KPSR 0x28 /* kpsr */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index ff455164732a..bde59825d06c 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -31,7 +31,6 @@ #include <asm/types.h> struct task_struct; -struct exec_domain; struct thread_info { /* D$ line 1 */ @@ -44,7 +43,6 @@ struct thread_info { /* D$ line 2 */ unsigned long fault_address; struct pt_regs *kregs; - struct exec_domain *exec_domain; int preempt_count; /* 0 => preemptable, <0 => BUG */ __u8 new_child; __u8 current_ds; @@ -80,18 +78,17 @@ struct thread_info { #define TI_KSP 0x00000018 #define TI_FAULT_ADDR 0x00000020 #define TI_KREGS 0x00000028 -#define TI_EXEC_DOMAIN 0x00000030 -#define TI_PRE_COUNT 0x00000038 -#define TI_NEW_CHILD 0x0000003c -#define TI_CURRENT_DS 0x0000003d -#define TI_CPU 0x0000003e -#define TI_UTRAPS 0x00000040 -#define TI_REG_WINDOW 0x00000048 -#define TI_RWIN_SPTRS 0x000003c8 -#define TI_GSR 0x00000400 -#define TI_XFSR 0x00000438 -#define TI_KUNA_REGS 0x00000470 -#define TI_KUNA_INSN 0x00000478 +#define TI_PRE_COUNT 0x00000030 +#define TI_NEW_CHILD 0x00000034 +#define TI_CURRENT_DS 0x00000035 +#define TI_CPU 0x00000036 +#define TI_UTRAPS 0x00000038 +#define TI_REG_WINDOW 0x00000040 +#define TI_RWIN_SPTRS 0x000003c0 +#define TI_GSR 0x000003f8 +#define TI_XFSR 0x00000430 +#define TI_KUNA_REGS 0x00000468 +#define TI_KUNA_INSN 0x00000470 #define TI_FPREGS 0x00000480 /* We embed this in the uppermost byte of thread_info->flags */ @@ -119,7 +116,6 @@ struct thread_info { { \ .task = &tsk, \ .current_ds = ASI_P, \ - .exec_domain = &default_exec_domain, \ .preempt_count = INIT_PREEMPT_COUNT, \ } diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index bfa4d0c2df42..5320689c06e9 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -13,6 +13,7 @@ #include <linux/errno.h> #include <linux/iommu-helper.h> #include <linux/bitmap.h> +#include <linux/iommu-common.h> #ifdef CONFIG_PCI #include <linux/pci.h> @@ -45,8 +46,9 @@ "i" (ASI_PHYS_BYPASS_EC_E)) /* Must be invoked under the IOMMU lock. */ -static void iommu_flushall(struct iommu *iommu) +static void iommu_flushall(struct iommu_map_table *iommu_map_table) { + struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); if (iommu->iommu_flushinv) { iommu_write(iommu->iommu_flushinv, ~(u64)0); } else { @@ -87,94 +89,6 @@ static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) iopte_val(*iopte) = val; } -/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle' - * facility it must all be done in one pass while under the iommu lock. - * - * On sun4u platforms, we only flush the IOMMU once every time we've passed - * over the entire page table doing allocations. Therefore we only ever advance - * the hint and cannot backtrack it. - */ -unsigned long iommu_range_alloc(struct device *dev, - struct iommu *iommu, - unsigned long npages, - unsigned long *handle) -{ - unsigned long n, end, start, limit, boundary_size; - struct iommu_arena *arena = &iommu->arena; - int pass = 0; - - /* This allocator was derived from x86_64's bit string search */ - - /* Sanity check */ - if (unlikely(npages == 0)) { - if (printk_ratelimit()) - WARN_ON(1); - return DMA_ERROR_CODE; - } - - if (handle && *handle) - start = *handle; - else - start = arena->hint; - - limit = arena->limit; - - /* The case below can happen if we have a small segment appended - * to a large, or when the previous alloc was at the very end of - * the available space. If so, go back to the beginning and flush. - */ - if (start >= limit) { - start = 0; - if (iommu->flush_all) - iommu->flush_all(iommu); - } - - again: - - if (dev) - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - 1 << IO_PAGE_SHIFT); - else - boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT); - - n = iommu_area_alloc(arena->map, limit, start, npages, - iommu->page_table_map_base >> IO_PAGE_SHIFT, - boundary_size >> IO_PAGE_SHIFT, 0); - if (n == -1) { - if (likely(pass < 1)) { - /* First failure, rescan from the beginning. */ - start = 0; - if (iommu->flush_all) - iommu->flush_all(iommu); - pass++; - goto again; - } else { - /* Second failure, give up */ - return DMA_ERROR_CODE; - } - } - - end = n + npages; - - arena->hint = end; - - /* Update handle for SG allocations */ - if (handle) - *handle = end; - - return n; -} - -void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages) -{ - struct iommu_arena *arena = &iommu->arena; - unsigned long entry; - - entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; - - bitmap_clear(arena->map, entry, npages); -} - int iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask, int numa_node) @@ -187,22 +101,20 @@ int iommu_table_init(struct iommu *iommu, int tsbsize, /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); iommu->ctx_lowest_free = 1; - iommu->page_table_map_base = dma_offset; + iommu->tbl.table_map_base = dma_offset; iommu->dma_addr_mask = dma_addr_mask; /* Allocate and initialize the free area map. */ sz = num_tsb_entries / 8; sz = (sz + 7UL) & ~7UL; - iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node); - if (!iommu->arena.map) { - printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n"); + iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node); + if (!iommu->tbl.map) return -ENOMEM; - } - memset(iommu->arena.map, 0, sz); - iommu->arena.limit = num_tsb_entries; + memset(iommu->tbl.map, 0, sz); - if (tlb_type != hypervisor) - iommu->flush_all = iommu_flushall; + iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, + (tlb_type != hypervisor ? iommu_flushall : NULL), + false, 1, false); /* Allocate and initialize the dummy page which we * set inactive IO PTEs to point to. @@ -235,18 +147,20 @@ out_free_dummy_page: iommu->dummy_page = 0UL; out_free_map: - kfree(iommu->arena.map); - iommu->arena.map = NULL; + kfree(iommu->tbl.map); + iommu->tbl.map = NULL; return -ENOMEM; } -static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, +static inline iopte_t *alloc_npages(struct device *dev, + struct iommu *iommu, unsigned long npages) { unsigned long entry; - entry = iommu_range_alloc(dev, iommu, npages, NULL); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + (unsigned long)(-1), 0); if (unlikely(entry == DMA_ERROR_CODE)) return NULL; @@ -284,7 +198,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, struct dma_attrs *attrs) { - unsigned long flags, order, first_page; + unsigned long order, first_page; struct iommu *iommu; struct page *page; int npages, nid; @@ -306,16 +220,14 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, iommu = dev->archdata.iommu; - spin_lock_irqsave(&iommu->lock, flags); iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); - spin_unlock_irqrestore(&iommu->lock, flags); if (unlikely(iopte == NULL)) { free_pages(first_page, order); return NULL; } - *dma_addrp = (iommu->page_table_map_base + + *dma_addrp = (iommu->tbl.table_map_base + ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); ret = (void *) first_page; npages = size >> IO_PAGE_SHIFT; @@ -336,16 +248,12 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, struct dma_attrs *attrs) { struct iommu *iommu; - unsigned long flags, order, npages; + unsigned long order, npages; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; - spin_lock_irqsave(&iommu->lock, flags); - - iommu_range_free(iommu, dvma, npages); - - spin_unlock_irqrestore(&iommu->lock, flags); + iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); order = get_order(size); if (order < 10) @@ -375,8 +283,8 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; - spin_lock_irqsave(&iommu->lock, flags); base = alloc_npages(dev, iommu, npages); + spin_lock_irqsave(&iommu->lock, flags); ctx = 0; if (iommu->iommu_ctxflush) ctx = iommu_alloc_ctx(iommu); @@ -385,7 +293,7 @@ static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, if (unlikely(!base)) goto bad; - bus_addr = (iommu->page_table_map_base + + bus_addr = (iommu->tbl.table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); @@ -496,7 +404,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; base = iommu->page_table + - ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); + ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); bus_addr &= IO_PAGE_MASK; spin_lock_irqsave(&iommu->lock, flags); @@ -515,11 +423,10 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, for (i = 0; i < npages; i++) iopte_make_dummy(iommu, base + i); - iommu_range_free(iommu, bus_addr, npages); - iommu_free_ctx(iommu, ctx); - spin_unlock_irqrestore(&iommu->lock, flags); + + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); } static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, @@ -567,7 +474,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; - base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; + base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; iopte_t *base; @@ -581,7 +488,8 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); - entry = iommu_range_alloc(dev, iommu, npages, &handle); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, + &handle, (unsigned long)(-1), 0); /* Handle failure */ if (unlikely(entry == DMA_ERROR_CODE)) { @@ -594,7 +502,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, base = iommu->page_table + entry; /* Convert entry to a dma_addr_t */ - dma_addr = iommu->page_table_map_base + + dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); @@ -654,15 +562,17 @@ iommu_map_failed: vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); - iommu_range_free(iommu, vaddr, npages); - entry = (vaddr - iommu->page_table_map_base) + entry = (vaddr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; base = iommu->page_table + entry; for (j = 0; j < npages; j++) iopte_make_dummy(iommu, base + j); + iommu_tbl_range_free(&iommu->tbl, vaddr, npages, + DMA_ERROR_CODE); + s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; } @@ -684,10 +594,11 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) if (iommu->iommu_ctxflush) { iopte_t *base; u32 bus_addr; + struct iommu_map_table *tbl = &iommu->tbl; bus_addr = sg->dma_address & IO_PAGE_MASK; base = iommu->page_table + - ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); + ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT); ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; } @@ -723,9 +634,8 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); - iommu_range_free(iommu, dma_handle, npages); - entry = ((dma_handle - iommu->page_table_map_base) + entry = ((dma_handle - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); base = iommu->page_table + entry; @@ -737,6 +647,8 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, for (i = 0; i < npages; i++) iopte_make_dummy(iommu, base + i); + iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, + DMA_ERROR_CODE); sg = sg_next(sg); } @@ -770,9 +682,10 @@ static void dma_4u_sync_single_for_cpu(struct device *dev, if (iommu->iommu_ctxflush && strbuf->strbuf_ctxflush) { iopte_t *iopte; + struct iommu_map_table *tbl = &iommu->tbl; iopte = iommu->page_table + - ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT); + ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT); ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; } @@ -805,9 +718,10 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, if (iommu->iommu_ctxflush && strbuf->strbuf_ctxflush) { iopte_t *iopte; + struct iommu_map_table *tbl = &iommu->tbl; - iopte = iommu->page_table + - ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT); + iopte = iommu->page_table + ((sglist[0].dma_address - + tbl->table_map_base) >> IO_PAGE_SHIFT); ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; } diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index 1ec0de4156e7..f4be0d724fc6 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -48,12 +48,4 @@ static inline int is_span_boundary(unsigned long entry, return iommu_is_span_boundary(entry, nr, shift, boundary_size); } -unsigned long iommu_range_alloc(struct device *dev, - struct iommu *iommu, - unsigned long npages, - unsigned long *handle); -void iommu_range_free(struct iommu *iommu, - dma_addr_t dma_addr, - unsigned long npages); - #endif /* _IOMMU_COMMON_H */ diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 274a9f59d95c..d2ae0f70059e 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/init.h> #include <linux/bitmap.h> +#include <linux/iommu-common.h> #include <asm/hypervisor.h> #include <asm/iommu.h> @@ -27,6 +28,10 @@ #define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_RELDATE "July 22, 2008" +#define COOKIE_PGSZ_CODE 0xf000000000000000ULL +#define COOKIE_PGSZ_CODE_SHIFT 60ULL + + static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; #define LDC_PACKET_SIZE 64 @@ -98,10 +103,10 @@ static const struct ldc_mode_ops stream_ops; int ldom_domaining_enabled; struct ldc_iommu { - /* Protects arena alloc/free. */ + /* Protects ldc_unmap. */ spinlock_t lock; - struct iommu_arena arena; struct ldc_mtable_entry *page_table; + struct iommu_map_table iommu_map_table; }; struct ldc_channel { @@ -998,31 +1003,59 @@ static void free_queue(unsigned long num_entries, struct ldc_packet *q) free_pages((unsigned long)q, order); } +static unsigned long ldc_cookie_to_index(u64 cookie, void *arg) +{ + u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT; + /* struct ldc_iommu *ldc_iommu = (struct ldc_iommu *)arg; */ + + cookie &= ~COOKIE_PGSZ_CODE; + + return (cookie >> (13ULL + (szcode * 3ULL))); +} + +static void ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie, + unsigned long entry, unsigned long npages) +{ + struct ldc_mtable_entry *base; + unsigned long i, shift; + + shift = (cookie >> COOKIE_PGSZ_CODE_SHIFT) * 3; + base = iommu->page_table + entry; + for (i = 0; i < npages; i++) { + if (base->cookie) + sun4v_ldc_revoke(id, cookie + (i << shift), + base->cookie); + base->mte = 0; + } +} + /* XXX Make this configurable... XXX */ #define LDC_IOTABLE_SIZE (8 * 1024) -static int ldc_iommu_init(struct ldc_channel *lp) +static int ldc_iommu_init(const char *name, struct ldc_channel *lp) { unsigned long sz, num_tsb_entries, tsbsize, order; - struct ldc_iommu *iommu = &lp->iommu; + struct ldc_iommu *ldc_iommu = &lp->iommu; + struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table; struct ldc_mtable_entry *table; unsigned long hv_err; int err; num_tsb_entries = LDC_IOTABLE_SIZE; tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); - - spin_lock_init(&iommu->lock); + spin_lock_init(&ldc_iommu->lock); sz = num_tsb_entries / 8; sz = (sz + 7UL) & ~7UL; - iommu->arena.map = kzalloc(sz, GFP_KERNEL); - if (!iommu->arena.map) { + iommu->map = kzalloc(sz, GFP_KERNEL); + if (!iommu->map) { printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz); return -ENOMEM; } - - iommu->arena.limit = num_tsb_entries; + iommu_tbl_pool_init(iommu, num_tsb_entries, PAGE_SHIFT, + NULL, false /* no large pool */, + 1 /* npools */, + true /* skip span boundary check */); order = get_order(tsbsize); @@ -1037,7 +1070,7 @@ static int ldc_iommu_init(struct ldc_channel *lp) memset(table, 0, PAGE_SIZE << order); - iommu->page_table = table; + ldc_iommu->page_table = table; hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table), num_tsb_entries); @@ -1049,31 +1082,32 @@ static int ldc_iommu_init(struct ldc_channel *lp) out_free_table: free_pages((unsigned long) table, order); - iommu->page_table = NULL; + ldc_iommu->page_table = NULL; out_free_map: - kfree(iommu->arena.map); - iommu->arena.map = NULL; + kfree(iommu->map); + iommu->map = NULL; return err; } static void ldc_iommu_release(struct ldc_channel *lp) { - struct ldc_iommu *iommu = &lp->iommu; + struct ldc_iommu *ldc_iommu = &lp->iommu; + struct iommu_map_table *iommu = &ldc_iommu->iommu_map_table; unsigned long num_tsb_entries, tsbsize, order; (void) sun4v_ldc_set_map_table(lp->id, 0, 0); - num_tsb_entries = iommu->arena.limit; + num_tsb_entries = iommu->poolsize * iommu->nr_pools; tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); order = get_order(tsbsize); - free_pages((unsigned long) iommu->page_table, order); - iommu->page_table = NULL; + free_pages((unsigned long) ldc_iommu->page_table, order); + ldc_iommu->page_table = NULL; - kfree(iommu->arena.map); - iommu->arena.map = NULL; + kfree(iommu->map); + iommu->map = NULL; } struct ldc_channel *ldc_alloc(unsigned long id, @@ -1140,7 +1174,7 @@ struct ldc_channel *ldc_alloc(unsigned long id, lp->id = id; - err = ldc_iommu_init(lp); + err = ldc_iommu_init(name, lp); if (err) goto out_free_ldc; @@ -1885,40 +1919,6 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size) } EXPORT_SYMBOL(ldc_read); -static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages) -{ - struct iommu_arena *arena = &iommu->arena; - unsigned long n, start, end, limit; - int pass; - - limit = arena->limit; - start = arena->hint; - pass = 0; - -again: - n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0); - end = n + npages; - if (unlikely(end >= limit)) { - if (likely(pass < 1)) { - limit = start; - start = 0; - pass++; - goto again; - } else { - /* Scanned the whole thing, give up. */ - return -1; - } - } - bitmap_set(arena->map, n, npages); - - arena->hint = end; - - return n; -} - -#define COOKIE_PGSZ_CODE 0xf000000000000000ULL -#define COOKIE_PGSZ_CODE_SHIFT 60ULL - static u64 pagesize_code(void) { switch (PAGE_SIZE) { @@ -1945,23 +1945,14 @@ static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset) page_offset); } -static u64 cookie_to_index(u64 cookie, unsigned long *shift) -{ - u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT; - - cookie &= ~COOKIE_PGSZ_CODE; - - *shift = szcode * 3; - - return (cookie >> (13ULL + (szcode * 3ULL))); -} static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, unsigned long npages) { long entry; - entry = arena_alloc(iommu, npages); + entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, + npages, NULL, (unsigned long)-1, 0); if (unlikely(entry < 0)) return NULL; @@ -2090,7 +2081,7 @@ int ldc_map_sg(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, int ncookies, unsigned int map_perm) { - unsigned long i, npages, flags; + unsigned long i, npages; struct ldc_mtable_entry *base; struct cookie_state state; struct ldc_iommu *iommu; @@ -2109,9 +2100,7 @@ int ldc_map_sg(struct ldc_channel *lp, iommu = &lp->iommu; - spin_lock_irqsave(&iommu->lock, flags); base = alloc_npages(iommu, npages); - spin_unlock_irqrestore(&iommu->lock, flags); if (!base) return -ENOMEM; @@ -2136,7 +2125,7 @@ int ldc_map_single(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, int ncookies, unsigned int map_perm) { - unsigned long npages, pa, flags; + unsigned long npages, pa; struct ldc_mtable_entry *base; struct cookie_state state; struct ldc_iommu *iommu; @@ -2152,9 +2141,7 @@ int ldc_map_single(struct ldc_channel *lp, iommu = &lp->iommu; - spin_lock_irqsave(&iommu->lock, flags); base = alloc_npages(iommu, npages); - spin_unlock_irqrestore(&iommu->lock, flags); if (!base) return -ENOMEM; @@ -2172,35 +2159,25 @@ int ldc_map_single(struct ldc_channel *lp, } EXPORT_SYMBOL(ldc_map_single); + static void free_npages(unsigned long id, struct ldc_iommu *iommu, u64 cookie, u64 size) { - struct iommu_arena *arena = &iommu->arena; - unsigned long i, shift, index, npages; - struct ldc_mtable_entry *base; + unsigned long npages, entry; npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; - index = cookie_to_index(cookie, &shift); - base = iommu->page_table + index; - BUG_ON(index > arena->limit || - (index + npages) > arena->limit); - - for (i = 0; i < npages; i++) { - if (base->cookie) - sun4v_ldc_revoke(id, cookie + (i << shift), - base->cookie); - base->mte = 0; - __clear_bit(index + i, arena->map); - } + entry = ldc_cookie_to_index(cookie, iommu); + ldc_demap(iommu, id, cookie, entry, npages); + iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry); } void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, int ncookies) { struct ldc_iommu *iommu = &lp->iommu; - unsigned long flags; int i; + unsigned long flags; spin_lock_irqsave(&iommu->lock, flags); for (i = 0; i < ncookies; i++) { diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c index 899b7203a4e4..4371f72ff025 100644 --- a/arch/sparc/kernel/leon_pci.c +++ b/arch/sparc/kernel/leon_pci.c @@ -34,15 +34,17 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info, &resources); - if (root_bus) { - /* Setup IRQs of all devices using custom routines */ - pci_fixup_irqs(pci_common_swizzle, info->map_irq); - - /* Assign devices with resources */ - pci_assign_unassigned_resources(); - } else { + if (!root_bus) { pci_free_resource_list(&resources); + return; } + + /* Setup IRQs of all devices using custom routines */ + pci_fixup_irqs(pci_common_swizzle, info->map_irq); + + /* Assign devices with resources */ + pci_assign_unassigned_resources(); + pci_bus_add_devices(root_bus); } void pcibios_fixup_bus(struct pci_bus *pbus) diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 99632a87e697..26c80e18d7b1 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -130,26 +130,26 @@ static struct mdesc_mem_ops memblock_mdesc_ops = { static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) { unsigned int handle_size; + struct mdesc_handle *hp; + unsigned long addr; void *base; handle_size = (sizeof(struct mdesc_handle) - sizeof(struct mdesc_hdr) + mdesc_size); + /* + * Allocation has to succeed because mdesc update would be missed + * and such events are not retransmitted. + */ base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL); - if (base) { - struct mdesc_handle *hp; - unsigned long addr; - - addr = (unsigned long)base; - addr = (addr + 15UL) & ~15UL; - hp = (struct mdesc_handle *) addr; + addr = (unsigned long)base; + addr = (addr + 15UL) & ~15UL; + hp = (struct mdesc_handle *) addr; - mdesc_handle_init(hp, handle_size, base); - return hp; - } + mdesc_handle_init(hp, handle_size, base); - return NULL; + return hp; } static void mdesc_kfree(struct mdesc_handle *hp) diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 9ce5afe167ff..6f7251fd2eab 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -639,10 +639,7 @@ static void pci_claim_bus_resources(struct pci_bus *bus) (unsigned long long)r->end, (unsigned int)r->flags); - if (pci_claim_resource(dev, i) == 0) - continue; - - pci_claim_bridge_resource(dev, i); + pci_claim_resource(dev, i); } } @@ -677,11 +674,10 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, } pci_of_scan_bus(pbm, node, bus); - pci_bus_add_devices(bus); pci_bus_register_of_sysfs(bus); pci_claim_bus_resources(bus); - + pci_bus_add_devices(bus); return bus; } diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 47ddbd496a1e..d2fe57dad433 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -15,6 +15,7 @@ #include <linux/export.h> #include <linux/log2.h> #include <linux/of_device.h> +#include <linux/iommu-common.h> #include <asm/iommu.h> #include <asm/irq.h> @@ -155,15 +156,13 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, iommu = dev->archdata.iommu; - spin_lock_irqsave(&iommu->lock, flags); - entry = iommu_range_alloc(dev, iommu, npages, NULL); - spin_unlock_irqrestore(&iommu->lock, flags); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + (unsigned long)(-1), 0); if (unlikely(entry == DMA_ERROR_CODE)) goto range_alloc_fail; - *dma_addrp = (iommu->page_table_map_base + - (entry << IO_PAGE_SHIFT)); + *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); ret = (void *) first_page; first_page = __pa(first_page); @@ -188,45 +187,46 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, return ret; iommu_map_fail: - /* Interrupts are disabled. */ - spin_lock(&iommu->lock); - iommu_range_free(iommu, *dma_addrp, npages); - spin_unlock_irqrestore(&iommu->lock, flags); + iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); range_alloc_fail: free_pages(first_page, order); return NULL; } +static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, + unsigned long npages) +{ + u32 devhandle = *(u32 *)demap_arg; + unsigned long num, flags; + + local_irq_save(flags); + do { + num = pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, entry), + npages); + + entry += num; + npages -= num; + } while (npages != 0); + local_irq_restore(flags); +} + static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, dma_addr_t dvma, struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; - unsigned long flags, order, npages, entry; + unsigned long order, npages, entry; u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; devhandle = pbm->devhandle; - entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); - - spin_lock_irqsave(&iommu->lock, flags); - - iommu_range_free(iommu, dvma, npages); - - do { - unsigned long num; - - num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), - npages); - entry += num; - npages -= num; - } while (npages != 0); - - spin_unlock_irqrestore(&iommu->lock, flags); - + entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); + dma_4v_iommu_demap(&devhandle, entry, npages); + iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); @@ -253,15 +253,13 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; - spin_lock_irqsave(&iommu->lock, flags); - entry = iommu_range_alloc(dev, iommu, npages, NULL); - spin_unlock_irqrestore(&iommu->lock, flags); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + (unsigned long)(-1), 0); if (unlikely(entry == DMA_ERROR_CODE)) goto bad; - bus_addr = (iommu->page_table_map_base + - (entry << IO_PAGE_SHIFT)); + bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); prot = HV_PCI_MAP_ATTR_READ; @@ -290,11 +288,7 @@ bad: return DMA_ERROR_CODE; iommu_map_fail: - /* Interrupts are disabled. */ - spin_lock(&iommu->lock); - iommu_range_free(iommu, bus_addr, npages); - spin_unlock_irqrestore(&iommu->lock, flags); - + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); return DMA_ERROR_CODE; } @@ -304,7 +298,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, { struct pci_pbm_info *pbm; struct iommu *iommu; - unsigned long flags, npages; + unsigned long npages; long entry; u32 devhandle; @@ -321,22 +315,9 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; bus_addr &= IO_PAGE_MASK; - - spin_lock_irqsave(&iommu->lock, flags); - - iommu_range_free(iommu, bus_addr, npages); - - entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; - do { - unsigned long num; - - num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), - npages); - entry += num; - npages -= num; - } while (npages != 0); - - spin_unlock_irqrestore(&iommu->lock, flags); + entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; + dma_4v_iommu_demap(&devhandle, entry, npages); + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, @@ -371,14 +352,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, /* Init first segment length for backout at failure */ outs->dma_length = 0; - spin_lock_irqsave(&iommu->lock, flags); + local_irq_save(flags); iommu_batch_start(dev, prot, ~0UL); max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; - base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; + base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; @@ -391,7 +372,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); - entry = iommu_range_alloc(dev, iommu, npages, &handle); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, + &handle, (unsigned long)(-1), 0); /* Handle failure */ if (unlikely(entry == DMA_ERROR_CODE)) { @@ -404,8 +386,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, iommu_batch_new_entry(entry); /* Convert entry to a dma_addr_t */ - dma_addr = iommu->page_table_map_base + - (entry << IO_PAGE_SHIFT); + dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); /* Insert into HW table */ @@ -451,7 +432,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (unlikely(err < 0L)) goto iommu_map_failed; - spin_unlock_irqrestore(&iommu->lock, flags); + local_irq_restore(flags); if (outcount < incount) { outs = sg_next(outs); @@ -469,7 +450,8 @@ iommu_map_failed: vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); - iommu_range_free(iommu, vaddr, npages); + iommu_tbl_range_free(&iommu->tbl, vaddr, npages, + DMA_ERROR_CODE); /* XXX demap? XXX */ s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; @@ -477,7 +459,7 @@ iommu_map_failed: if (s == outs) break; } - spin_unlock_irqrestore(&iommu->lock, flags); + local_irq_restore(flags); return 0; } @@ -489,7 +471,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, struct pci_pbm_info *pbm; struct scatterlist *sg; struct iommu *iommu; - unsigned long flags; + unsigned long flags, entry; u32 devhandle; BUG_ON(direction == DMA_NONE); @@ -498,33 +480,27 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, pbm = dev->archdata.host_controller; devhandle = pbm->devhandle; - spin_lock_irqsave(&iommu->lock, flags); + local_irq_save(flags); sg = sglist; while (nelems--) { dma_addr_t dma_handle = sg->dma_address; unsigned int len = sg->dma_length; - unsigned long npages, entry; + unsigned long npages; + struct iommu_map_table *tbl = &iommu->tbl; + unsigned long shift = IO_PAGE_SHIFT; if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); - iommu_range_free(iommu, dma_handle, npages); - - entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); - while (npages) { - unsigned long num; - - num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), - npages); - entry += num; - npages -= num; - } - + entry = ((dma_handle - tbl->table_map_base) >> shift); + dma_4v_iommu_demap(&devhandle, entry, npages); + iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, + DMA_ERROR_CODE); sg = sg_next(sg); } - spin_unlock_irqrestore(&iommu->lock, flags); + local_irq_restore(flags); } static struct dma_map_ops sun4v_dma_ops = { @@ -550,30 +526,33 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) } static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, - struct iommu *iommu) + struct iommu_map_table *iommu) { - struct iommu_arena *arena = &iommu->arena; - unsigned long i, cnt = 0; + struct iommu_pool *pool; + unsigned long i, pool_nr, cnt = 0; u32 devhandle; devhandle = pbm->devhandle; - for (i = 0; i < arena->limit; i++) { - unsigned long ret, io_attrs, ra; - - ret = pci_sun4v_iommu_getmap(devhandle, - HV_PCI_TSBID(0, i), - &io_attrs, &ra); - if (ret == HV_EOK) { - if (page_in_phys_avail(ra)) { - pci_sun4v_iommu_demap(devhandle, - HV_PCI_TSBID(0, i), 1); - } else { - cnt++; - __set_bit(i, arena->map); + for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { + pool = &(iommu->pools[pool_nr]); + for (i = pool->start; i <= pool->end; i++) { + unsigned long ret, io_attrs, ra; + + ret = pci_sun4v_iommu_getmap(devhandle, + HV_PCI_TSBID(0, i), + &io_attrs, &ra); + if (ret == HV_EOK) { + if (page_in_phys_avail(ra)) { + pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, + i), 1); + } else { + cnt++; + __set_bit(i, iommu->map); + } } } } - return cnt; } @@ -603,20 +582,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); iommu->ctx_lowest_free = 1; - iommu->page_table_map_base = dma_offset; + iommu->tbl.table_map_base = dma_offset; iommu->dma_addr_mask = dma_mask; /* Allocate and initialize the free area map. */ sz = (num_tsb_entries + 7) / 8; sz = (sz + 7UL) & ~7UL; - iommu->arena.map = kzalloc(sz, GFP_KERNEL); - if (!iommu->arena.map) { + iommu->tbl.map = kzalloc(sz, GFP_KERNEL); + if (!iommu->tbl.map) { printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); return -ENOMEM; } - iommu->arena.limit = num_tsb_entries; - - sz = probe_existing_entries(pbm, iommu); + iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, + NULL, false /* no large_pool */, + 0 /* default npools */, + false /* want span boundary checking */); + sz = probe_existing_entries(pbm, &iommu->tbl); if (sz) printk("%s: Imported %lu TSB entries from OBP\n", pbm->name, sz); diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 6cc78c213c01..24384e1dc33d 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -391,12 +391,16 @@ static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic) struct linux_pbm_info *pbm = &pcic->pbm; pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, &pcic_ops, pbm); + if (!pbm->pci_bus) + return; + #if 0 /* deadwood transplanted from sparc64 */ pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); pci_record_assignments(pbm, pbm->pci_bus); pci_assign_unassigned(pbm, pbm->pci_bus); pci_fixup_irq(pbm, pbm->pci_bus); #endif + pci_bus_add_devices(pbm->pci_bus); } /* diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 2f80d23a0a44..18147a5523d9 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -181,17 +181,13 @@ static struct clocksource timer_cs = { .rating = 100, .read = timer_cs_read, .mask = CLOCKSOURCE_MASK(64), - .shift = 2, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static __init int setup_timer_cs(void) { timer_cs_enabled = 1; - timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate, - timer_cs.shift); - - return clocksource_register(&timer_cs); + return clocksource_register_hz(&timer_cs, sparc_config.clock_rate); } #ifdef CONFIG_SMP diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index 6fd386c5232a..4f21df7d4f13 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -433,7 +433,6 @@ void trap_init(void) /* Force linker to barf if mismatched */ if (TI_UWINMASK != offsetof(struct thread_info, uwinmask) || TI_TASK != offsetof(struct thread_info, task) || - TI_EXECDOMAIN != offsetof(struct thread_info, exec_domain) || TI_FLAGS != offsetof(struct thread_info, flags) || TI_CPU != offsetof(struct thread_info, cpu) || TI_PREEMPT != offsetof(struct thread_info, preempt_count) || diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 0e699745d643..d21cd625c0de 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2691,8 +2691,6 @@ void __init trap_init(void) fault_address) || TI_KREGS != offsetof(struct thread_info, kregs) || TI_UTRAPS != offsetof(struct thread_info, utraps) || - TI_EXEC_DOMAIN != offsetof(struct thread_info, - exec_domain) || TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || TI_RWIN_SPTRS != offsetof(struct thread_info, |