diff options
Diffstat (limited to 'arch/sparc')
60 files changed, 2194 insertions, 833 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 3f8b6a92eabd..86b82348b97c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -25,6 +25,9 @@ config SPARC select ARCH_WANT_OPTIONAL_GPIOLIB select RTC_CLASS select RTC_DRV_M48T59 + select HAVE_PERF_COUNTERS + select HAVE_DMA_ATTRS + select HAVE_DMA_API_DEBUG config SPARC32 def_bool !64BIT @@ -44,6 +47,7 @@ config SPARC64 select RTC_DRV_BQ4802 select RTC_DRV_SUN4V select RTC_DRV_STARFIRE + select HAVE_PERF_COUNTERS config ARCH_DEFCONFIG string @@ -95,7 +99,7 @@ config AUDIT_ARCH config HAVE_SETUP_PER_CPU_AREA def_bool y if SPARC64 -config HAVE_DYNAMIC_PER_CPU_AREA +config NEED_PER_CPU_EMBED_FIRST_CHUNK def_bool y if SPARC64 config GENERIC_HARDIRQS_NO__DO_IRQ @@ -437,6 +441,17 @@ config SERIAL_CONSOLE If unsure, say N. +config SPARC_LEON + bool "Sparc Leon processor family" + depends on SPARC32 + ---help--- + If you say Y here if you are running on a SPARC-LEON processor. + The LEON processor is a synthesizable VHDL model of the + SPARC-v8 standard. LEON is part of the GRLIB collection of + IP cores that are distributed under GPL. GRLIB can be downloaded + from www.gaisler.com. You can download a sparc-linux cross-compilation + toolchain at www.gaisler.com. + endmenu menu "Bus options (PCI etc.)" diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index 2003ded054c2..467221dd5702 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -38,10 +38,6 @@ CPPFLAGS_vmlinux.lds += -m32 # Actual linking is done with "make image". LDFLAGS_vmlinux = -r -# Default target -all: zImage - - else ##### # sparc64 @@ -91,6 +87,9 @@ endif boot := arch/sparc/boot +# Default target +all: zImage + image zImage tftpboot.img vmlinux.aout: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ @@ -109,8 +108,9 @@ define archhelp endef else define archhelp - echo '* vmlinux - Standard sparc64 kernel' - echo ' vmlinux.aout - a.out kernel for sparc64' + echo '* vmlinux - standard sparc64 kernel' + echo '* zImage - stripped and compressed sparc64 kernel ($(boot)/zImage)' + echo ' vmlinux.aout - a.out kernel for sparc64' echo ' tftpboot.img - image prepared for tftp' endef endif diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile index 1ff0fd924756..97e3feb9ff1b 100644 --- a/arch/sparc/boot/Makefile +++ b/arch/sparc/boot/Makefile @@ -79,6 +79,9 @@ $(obj)/image: vmlinux FORCE $(call if_changed,strip) @echo ' kernel: $@ is ready' +$(obj)/zImage: $(obj)/image + $(call if_changed,gzip) + $(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE $(call if_changed,elftoaout) $(call if_changed,piggy) diff --git a/arch/sparc/include/asm/agp.h b/arch/sparc/include/asm/agp.h index c2456870b05c..70f52c1661bc 100644 --- a/arch/sparc/include/asm/agp.h +++ b/arch/sparc/include/asm/agp.h @@ -7,10 +7,6 @@ #define unmap_page_from_agp(page) #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h index 74703c5ef985..b2e3db63a64b 100644 --- a/arch/sparc/include/asm/asi.h +++ b/arch/sparc/include/asm/asi.h @@ -40,7 +40,11 @@ #define ASI_M_UNA01 0x01 /* Same here... */ #define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ #define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ +#ifndef CONFIG_SPARC_LEON #define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ +#else +#define ASI_M_MMUREGS 0x19 +#endif /* CONFIG_SPARC_LEON */ #define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ #define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ #define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h index 3702e087df2c..f3b85b6b0b76 100644 --- a/arch/sparc/include/asm/device.h +++ b/arch/sparc/include/asm/device.h @@ -32,4 +32,7 @@ dev_archdata_get_node(const struct dev_archdata *ad) return ad->prom_node; } +struct pdev_archdata { +}; + #endif /* _ASM_SPARC_DEVICE_H */ diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 204e4bf64438..5a8c308e2b5c 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -3,6 +3,7 @@ #include <linux/scatterlist.h> #include <linux/mm.h> +#include <linux/dma-debug.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) @@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_is_consistent(d, h) (1) -struct dma_ops { - void *(*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - void (*free_coherent)(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle); - dma_addr_t (*map_page)(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction); - void (*unmap_page)(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction); - int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction); - void (*unmap_sg)(struct device *dev, struct scatterlist *sg, - int nhwentries, - enum dma_data_direction direction); - void (*sync_single_for_cpu)(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction); - void (*sync_single_for_device)(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction); - void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, - int nelems, - enum dma_data_direction direction); - void (*sync_sg_for_device)(struct device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction dir); -}; -extern const struct dma_ops *dma_ops; +extern struct dma_map_ops *dma_ops, pci32_dma_ops; +extern struct bus_type pci_bus_type; -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); -} - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); -} - -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, - enum dma_data_direction direction) -{ - return dma_ops->map_page(dev, virt_to_page(cpu_addr), - (unsigned long)cpu_addr & ~PAGE_MASK, size, - direction); -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction) -{ - dma_ops->unmap_page(dev, dma_addr, size, direction); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - return dma_ops->map_page(dev, page, offset, size, direction); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, - enum dma_data_direction direction) -{ - dma_ops->unmap_page(dev, dma_address, size, direction); -} - -static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ - return dma_ops->map_sg(dev, sg, nents, direction); -} - -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) +static inline struct dma_map_ops *get_dma_ops(struct device *dev) { - dma_ops->unmap_sg(dev, sg, nents, direction); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); +#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) + if (dev->bus == &pci_bus_type) + return &pci32_dma_ops; +#endif + return dma_ops; } -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) -{ - if (dma_ops->sync_single_for_device) - dma_ops->sync_single_for_device(dev, dma_handle, size, - direction); -} +#include <asm-generic/dma-mapping-common.h> -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) { - dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); -} + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - if (dma_ops->sync_sg_for_device) - dma_ops->sync_sg_for_device(dev, sg, nelems, direction); + cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; } -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction dir) +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) { - dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); -} + struct dma_map_ops *ops = get_dma_ops(dev); -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - dma_sync_single_for_device(dev, dma_handle+offset, size, dir); + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free_coherent(dev, size, cpu_addr, dma_handle); } - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return (dma_addr == DMA_ERROR_CODE); diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 1934f2cbf513..a0b443cb3c1f 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -89,8 +89,8 @@ static inline unsigned long get_softint(void) return retval; } -void __trigger_all_cpu_backtrace(void); -#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() +void arch_trigger_all_cpu_backtrace(void); +#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace extern void *hardirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS]; diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h new file mode 100644 index 000000000000..28a42b73f64f --- /dev/null +++ b/arch/sparc/include/asm/leon.h @@ -0,0 +1,362 @@ +/* + * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com) Gaisler Research + * Copyright (C) 2004 Stefan Holst (mail@s-holst.de) Uni-Stuttgart + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + */ + +#ifndef LEON_H_INCLUDE +#define LEON_H_INCLUDE + +#ifdef CONFIG_SPARC_LEON + +#define ASI_LEON_NOCACHE 0x01 + +#define ASI_LEON_DCACHE_MISS 0x1 + +#define ASI_LEON_CACHEREGS 0x02 +#define ASI_LEON_IFLUSH 0x10 +#define ASI_LEON_DFLUSH 0x11 + +#define ASI_LEON_MMUFLUSH 0x18 +#define ASI_LEON_MMUREGS 0x19 +#define ASI_LEON_BYPASS 0x1c +#define ASI_LEON_FLUSH_PAGE 0x10 + +/* mmu register access, ASI_LEON_MMUREGS */ +#define LEON_CNR_CTRL 0x000 +#define LEON_CNR_CTXP 0x100 +#define LEON_CNR_CTX 0x200 +#define LEON_CNR_F 0x300 +#define LEON_CNR_FADDR 0x400 + +#define LEON_CNR_CTX_NCTX 256 /*number of MMU ctx */ + +#define LEON_CNR_CTRL_TLBDIS 0x80000000 + +#define LEON_MMUTLB_ENT_MAX 64 + +/* + * diagnostic access from mmutlb.vhd: + * 0: pte address + * 4: pte + * 8: additional flags + */ +#define LEON_DIAGF_LVL 0x3 +#define LEON_DIAGF_WR 0x8 +#define LEON_DIAGF_WR_SHIFT 3 +#define LEON_DIAGF_HIT 0x10 +#define LEON_DIAGF_HIT_SHIFT 4 +#define LEON_DIAGF_CTX 0x1fe0 +#define LEON_DIAGF_CTX_SHIFT 5 +#define LEON_DIAGF_VALID 0x2000 +#define LEON_DIAGF_VALID_SHIFT 13 + +/* + * Interrupt Sources + * + * The interrupt source numbers directly map to the trap type and to + * the bits used in the Interrupt Clear, Interrupt Force, Interrupt Mask, + * and the Interrupt Pending Registers. + */ +#define LEON_INTERRUPT_CORRECTABLE_MEMORY_ERROR 1 +#define LEON_INTERRUPT_UART_1_RX_TX 2 +#define LEON_INTERRUPT_UART_0_RX_TX 3 +#define LEON_INTERRUPT_EXTERNAL_0 4 +#define LEON_INTERRUPT_EXTERNAL_1 5 +#define LEON_INTERRUPT_EXTERNAL_2 6 +#define LEON_INTERRUPT_EXTERNAL_3 7 +#define LEON_INTERRUPT_TIMER1 8 +#define LEON_INTERRUPT_TIMER2 9 +#define LEON_INTERRUPT_EMPTY1 10 +#define LEON_INTERRUPT_EMPTY2 11 +#define LEON_INTERRUPT_OPEN_ETH 12 +#define LEON_INTERRUPT_EMPTY4 13 +#define LEON_INTERRUPT_EMPTY5 14 +#define LEON_INTERRUPT_EMPTY6 15 + +/* irq masks */ +#define LEON_HARD_INT(x) (1 << (x)) /* irq 0-15 */ +#define LEON_IRQMASK_R 0x0000fffe /* bit 15- 1 of lregs.irqmask */ +#define LEON_IRQPRIO_R 0xfffe0000 /* bit 31-17 of lregs.irqmask */ + +/* leon uart register definitions */ +#define LEON_OFF_UDATA 0x0 +#define LEON_OFF_USTAT 0x4 +#define LEON_OFF_UCTRL 0x8 +#define LEON_OFF_USCAL 0xc + +#define LEON_UCTRL_RE 0x01 +#define LEON_UCTRL_TE 0x02 +#define LEON_UCTRL_RI 0x04 +#define LEON_UCTRL_TI 0x08 +#define LEON_UCTRL_PS 0x10 +#define LEON_UCTRL_PE 0x20 +#define LEON_UCTRL_FL 0x40 +#define LEON_UCTRL_LB 0x80 + +#define LEON_USTAT_DR 0x01 +#define LEON_USTAT_TS 0x02 +#define LEON_USTAT_TH 0x04 +#define LEON_USTAT_BR 0x08 +#define LEON_USTAT_OV 0x10 +#define LEON_USTAT_PE 0x20 +#define LEON_USTAT_FE 0x40 + +#define LEON_MCFG2_SRAMDIS 0x00002000 +#define LEON_MCFG2_SDRAMEN 0x00004000 +#define LEON_MCFG2_SRAMBANKSZ 0x00001e00 /* [12-9] */ +#define LEON_MCFG2_SRAMBANKSZ_SHIFT 9 +#define LEON_MCFG2_SDRAMBANKSZ 0x03800000 /* [25-23] */ +#define LEON_MCFG2_SDRAMBANKSZ_SHIFT 23 + +#define LEON_TCNT0_MASK 0x7fffff + +#define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE) +/* no break yet */ + +#define ASI_LEON3_SYSCTRL 0x02 +#define ASI_LEON3_SYSCTRL_ICFG 0x08 +#define ASI_LEON3_SYSCTRL_DCFG 0x0c +#define ASI_LEON3_SYSCTRL_CFG_SNOOPING (1 << 27) +#define ASI_LEON3_SYSCTRL_CFG_SSIZE(c) (1 << ((c >> 20) & 0xf)) + +#ifndef __ASSEMBLY__ + +/* do a virtual address read without cache */ +static inline unsigned long leon_readnobuffer_reg(unsigned long paddr) +{ + unsigned long retval; + __asm__ __volatile__("lda [%1] %2, %0\n\t" : + "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE)); + return retval; +} + +/* do a physical address bypass write, i.e. for 0x80000000 */ +static inline void leon_store_reg(unsigned long paddr, unsigned long value) +{ + __asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r"(value), "r"(paddr), + "i"(ASI_LEON_BYPASS) : "memory"); +} + +/* do a physical address bypass load, i.e. for 0x80000000 */ +static inline unsigned long leon_load_reg(unsigned long paddr) +{ + unsigned long retval; + __asm__ __volatile__("lda [%1] %2, %0\n\t" : + "=r"(retval) : "r"(paddr), "i"(ASI_LEON_BYPASS)); + return retval; +} + +extern inline void leon_srmmu_disabletlb(void) +{ + unsigned int retval; + __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), + "i"(ASI_LEON_MMUREGS)); + retval |= LEON_CNR_CTRL_TLBDIS; + __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0), + "i"(ASI_LEON_MMUREGS) : "memory"); +} + +extern inline void leon_srmmu_enabletlb(void) +{ + unsigned int retval; + __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), + "i"(ASI_LEON_MMUREGS)); + retval = retval & ~LEON_CNR_CTRL_TLBDIS; + __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0), + "i"(ASI_LEON_MMUREGS) : "memory"); +} + +/* macro access for leon_load_reg() and leon_store_reg() */ +#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x))) +#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v))) +#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v) +#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v) +#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x)) +#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v)) +#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS) +#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v)) +#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v)) +#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v)) + +/* macro access for leon_readnobuffer_reg() */ +#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x)) + +extern void sparc_leon_eirq_register(int eirq); +extern void leon_init(void); +extern void leon_switch_mm(void); +extern void leon_init_IRQ(void); + +extern unsigned long last_valid_pfn; + +extern inline unsigned long sparc_leon3_get_dcachecfg(void) +{ + unsigned int retval; + __asm__ __volatile__("lda [%1] %2, %0\n\t" : + "=r"(retval) : + "r"(ASI_LEON3_SYSCTRL_DCFG), + "i"(ASI_LEON3_SYSCTRL)); + return retval; +} + +/* enable snooping */ +extern inline void sparc_leon3_enable_snooping(void) +{ + __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" + "set 0x800000, %%l2\n\t" + "or %%l2, %%l1, %%l2\n\t" + "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); +}; + +extern inline void sparc_leon3_disable_cache(void) +{ + __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" + "set 0x00000f, %%l2\n\t" + "andn %%l2, %%l1, %%l2\n\t" + "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); +}; + +#endif /*!__ASSEMBLY__*/ + +#ifdef CONFIG_SMP +# define LEON3_IRQ_RESCHEDULE 13 +# define LEON3_IRQ_TICKER (leon_percpu_timer_dev[0].irq) +# define LEON3_IRQ_CROSS_CALL 15 +#endif + +#if defined(PAGE_SIZE_LEON_8K) +#define LEON_PAGE_SIZE_LEON 1 +#elif defined(PAGE_SIZE_LEON_16K) +#define LEON_PAGE_SIZE_LEON 2) +#else +#define LEON_PAGE_SIZE_LEON 0 +#endif + +#if LEON_PAGE_SIZE_LEON == 0 +/* [ 8, 6, 6 ] + 12 */ +#define LEON_PGD_SH 24 +#define LEON_PGD_M 0xff +#define LEON_PMD_SH 18 +#define LEON_PMD_SH_V (LEON_PGD_SH-2) +#define LEON_PMD_M 0x3f +#define LEON_PTE_SH 12 +#define LEON_PTE_M 0x3f +#elif LEON_PAGE_SIZE_LEON == 1 +/* [ 7, 6, 6 ] + 13 */ +#define LEON_PGD_SH 25 +#define LEON_PGD_M 0x7f +#define LEON_PMD_SH 19 +#define LEON_PMD_SH_V (LEON_PGD_SH-1) +#define LEON_PMD_M 0x3f +#define LEON_PTE_SH 13 +#define LEON_PTE_M 0x3f +#elif LEON_PAGE_SIZE_LEON == 2 +/* [ 6, 6, 6 ] + 14 */ +#define LEON_PGD_SH 26 +#define LEON_PGD_M 0x3f +#define LEON_PMD_SH 20 +#define LEON_PMD_SH_V (LEON_PGD_SH-0) +#define LEON_PMD_M 0x3f +#define LEON_PTE_SH 14 +#define LEON_PTE_M 0x3f +#elif LEON_PAGE_SIZE_LEON == 3 +/* [ 4, 7, 6 ] + 15 */ +#define LEON_PGD_SH 28 +#define LEON_PGD_M 0x0f +#define LEON_PMD_SH 21 +#define LEON_PMD_SH_V (LEON_PGD_SH-0) +#define LEON_PMD_M 0x7f +#define LEON_PTE_SH 15 +#define LEON_PTE_M 0x3f +#else +#error cannot determine LEON_PAGE_SIZE_LEON +#endif + +#define PAGE_MIN_SHIFT (12) +#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT) + +#define LEON3_XCCR_SETS_MASK 0x07000000UL +#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL + +#define LEON2_CCR_DSETS_MASK 0x03000000UL +#define LEON2_CFG_SSIZE_MASK 0x00007000UL + +#ifndef __ASSEMBLY__ +extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr); +extern void leon_flush_icache_all(void); +extern void leon_flush_dcache_all(void); +extern void leon_flush_cache_all(void); +extern void leon_flush_tlb_all(void); +extern int leon_flush_during_switch; +extern int leon_flush_needed(void); + +struct vm_area_struct; +extern void leon_flush_icache_all(void); +extern void leon_flush_dcache_all(void); +extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page); +extern void leon_flush_cache_all(void); +extern void leon_flush_tlb_all(void); +extern int leon_flush_during_switch; +extern int leon_flush_needed(void); +extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page); + +/* struct that hold LEON3 cache configuration registers */ +struct leon3_cacheregs { + unsigned long ccr; /* 0x00 - Cache Control Register */ + unsigned long iccr; /* 0x08 - Instruction Cache Configuration Register */ + unsigned long dccr; /* 0x0c - Data Cache Configuration Register */ +}; + +/* struct that hold LEON2 cache configuration register + * & configuration register + */ +struct leon2_cacheregs { + unsigned long ccr, cfg; +}; + +#ifdef __KERNEL__ + +#include <linux/interrupt.h> + +struct device_node; +extern int sparc_leon_eirq_get(int eirq, int cpu); +extern irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id); +extern void sparc_leon_eirq_register(int eirq); +extern void leon_clear_clock_irq(void); +extern void leon_load_profile_irq(int cpu, unsigned int limit); +extern void leon_init_timers(irq_handler_t counter_fn); +extern void leon_clear_clock_irq(void); +extern void leon_load_profile_irq(int cpu, unsigned int limit); +extern void leon_trans_init(struct device_node *dp); +extern void leon_node_init(struct device_node *dp, struct device_node ***nextp); +extern void leon_init_IRQ(void); +extern void leon_init(void); +extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr); +extern void init_leon(void); +extern void poke_leonsparc(void); +extern void leon3_getCacheRegs(struct leon3_cacheregs *regs); +extern int leon_flush_needed(void); +extern void leon_switch_mm(void); +extern int srmmu_swprobe_trace; + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +/* macros used in leon_mm.c */ +#define PFN(x) ((x) >> PAGE_SHIFT) +#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base))) +#define _SRMMU_PTE_PMASK_LEON 0xffffffff + +#else /* defined(CONFIG_SPARC_LEON) */ + +/* nop definitions for !LEON case */ +#define leon_init() do {} while (0) +#define leon_switch_mm() do {} while (0) +#define leon_init_IRQ() do {} while (0) +#define init_leon() do {} while (0) + +#endif /* !defined(CONFIG_SPARC_LEON) */ + +#endif diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h new file mode 100644 index 000000000000..618e88821795 --- /dev/null +++ b/arch/sparc/include/asm/leon_amba.h @@ -0,0 +1,263 @@ +/* +*Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com), Gaisler Research +*Copyright (C) 2004 Stefan Holst (mail@s-holst.de), Uni-Stuttgart +*Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com),Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB +*/ + +#ifndef LEON_AMBA_H_INCLUDE +#define LEON_AMBA_H_INCLUDE + +#ifndef __ASSEMBLY__ + +struct amba_prom_registers { + unsigned int phys_addr; /* The physical address of this register */ + unsigned int reg_size; /* How many bytes does this register take up? */ +}; + +#endif + +/* + * The following defines the bits in the LEON UART Status Registers. + */ + +#define LEON_REG_UART_STATUS_DR 0x00000001 /* Data Ready */ +#define LEON_REG_UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */ +#define LEON_REG_UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */ +#define LEON_REG_UART_STATUS_BR 0x00000008 /* Break Error */ +#define LEON_REG_UART_STATUS_OE 0x00000010 /* RX Overrun Error */ +#define LEON_REG_UART_STATUS_PE 0x00000020 /* RX Parity Error */ +#define LEON_REG_UART_STATUS_FE 0x00000040 /* RX Framing Error */ +#define LEON_REG_UART_STATUS_ERR 0x00000078 /* Error Mask */ + +/* + * The following defines the bits in the LEON UART Ctrl Registers. + */ + +#define LEON_REG_UART_CTRL_RE 0x00000001 /* Receiver enable */ +#define LEON_REG_UART_CTRL_TE 0x00000002 /* Transmitter enable */ +#define LEON_REG_UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */ +#define LEON_REG_UART_CTRL_TI 0x00000008 /* Transmitter irq */ +#define LEON_REG_UART_CTRL_PS 0x00000010 /* Parity select */ +#define LEON_REG_UART_CTRL_PE 0x00000020 /* Parity enable */ +#define LEON_REG_UART_CTRL_FL 0x00000040 /* Flow control enable */ +#define LEON_REG_UART_CTRL_LB 0x00000080 /* Loop Back enable */ + +#define LEON3_GPTIMER_EN 1 +#define LEON3_GPTIMER_RL 2 +#define LEON3_GPTIMER_LD 4 +#define LEON3_GPTIMER_IRQEN 8 +#define LEON3_GPTIMER_SEPIRQ 8 + +#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ +/* 0 = hold scalar and counter */ +#define LEON23_REG_TIMER_CONTROL_RL 0x00000002 /* 1 = reload at 0 */ + /* 0 = stop at 0 */ +#define LEON23_REG_TIMER_CONTROL_LD 0x00000004 /* 1 = load counter */ + /* 0 = no function */ +#define LEON23_REG_TIMER_CONTROL_IQ 0x00000008 /* 1 = irq enable */ + /* 0 = no function */ + +/* + * The following defines the bits in the LEON PS/2 Status Registers. + */ + +#define LEON_REG_PS2_STATUS_DR 0x00000001 /* Data Ready */ +#define LEON_REG_PS2_STATUS_PE 0x00000002 /* Parity error */ +#define LEON_REG_PS2_STATUS_FE 0x00000004 /* Framing error */ +#define LEON_REG_PS2_STATUS_KI 0x00000008 /* Keyboard inhibit */ +#define LEON_REG_PS2_STATUS_RF 0x00000010 /* RX buffer full */ +#define LEON_REG_PS2_STATUS_TF 0x00000020 /* TX buffer full */ + +/* + * The following defines the bits in the LEON PS/2 Ctrl Registers. + */ + +#define LEON_REG_PS2_CTRL_RE 0x00000001 /* Receiver enable */ +#define LEON_REG_PS2_CTRL_TE 0x00000002 /* Transmitter enable */ +#define LEON_REG_PS2_CTRL_RI 0x00000004 /* Keyboard receive irq */ +#define LEON_REG_PS2_CTRL_TI 0x00000008 /* Keyboard transmit irq */ + +#define LEON3_IRQMPSTATUS_CPUNR 28 +#define LEON3_IRQMPSTATUS_BROADCAST 27 + +#define GPTIMER_CONFIG_IRQNT(a) (((a) >> 3) & 0x1f) +#define GPTIMER_CONFIG_ISSEP(a) ((a) & (1 << 8)) +#define GPTIMER_CONFIG_NTIMERS(a) ((a) & (0x7)) +#define LEON3_GPTIMER_CTRL_PENDING 0x10 +#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7) +#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0) + +#ifdef CONFIG_SPARC_LEON + +#ifndef __ASSEMBLY__ + +struct leon3_irqctrl_regs_map { + u32 ilevel; + u32 ipend; + u32 iforce; + u32 iclear; + u32 mpstatus; + u32 mpbroadcast; + u32 notused02; + u32 notused03; + u32 notused10; + u32 notused11; + u32 notused12; + u32 notused13; + u32 notused20; + u32 notused21; + u32 notused22; + u32 notused23; + u32 mask[16]; + u32 force[16]; + /* Extended IRQ registers */ + u32 intid[16]; /* 0xc0 */ +}; + +struct leon3_apbuart_regs_map { + u32 data; + u32 status; + u32 ctrl; + u32 scaler; +}; + +struct leon3_gptimerelem_regs_map { + u32 val; + u32 rld; + u32 ctrl; + u32 unused; +}; + +struct leon3_gptimer_regs_map { + u32 scalar; + u32 scalar_reload; + u32 config; + u32 unused; + struct leon3_gptimerelem_regs_map e[8]; +}; + +/* + * Types and structure used for AMBA Plug & Play bus scanning + */ + +#define AMBA_MAXAPB_DEVS 64 +#define AMBA_MAXAPB_DEVS_PERBUS 16 + +struct amba_device_table { + int devnr; /* number of devices on AHB or APB bus */ + unsigned int *addr[16]; /* addresses to the devices configuration tables */ + unsigned int allocbits[1]; /* 0=unallocated, 1=allocated driver */ +}; + +struct amba_apbslv_device_table { + int devnr; /* number of devices on AHB or APB bus */ + unsigned int *addr[AMBA_MAXAPB_DEVS]; /* addresses to the devices configuration tables */ + unsigned int apbmst[AMBA_MAXAPB_DEVS]; /* apb master if a entry is a apb slave */ + unsigned int apbmstidx[AMBA_MAXAPB_DEVS]; /* apb master idx if a entry is a apb slave */ + unsigned int allocbits[4]; /* 0=unallocated, 1=allocated driver */ +}; + +struct amba_confarea_type { + struct amba_confarea_type *next;/* next bus in chain */ + struct amba_device_table ahbmst; + struct amba_device_table ahbslv; + struct amba_apbslv_device_table apbslv; + unsigned int apbmst; +}; + +/* collect apb slaves */ +struct amba_apb_device { + unsigned int start, irq, bus_id; + struct amba_confarea_type *bus; +}; + +/* collect ahb slaves */ +struct amba_ahb_device { + unsigned int start[4], irq, bus_id; + struct amba_confarea_type *bus; +}; + +struct device_node; +void _amba_init(struct device_node *dp, struct device_node ***nextp); + +extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; +extern struct leon3_gptimer_regs_map *leon3_gptimer_regs; +extern struct amba_apb_device leon_percpu_timer_dev[16]; +extern int leondebug_irq_disable; +extern int leon_debug_irqout; +extern unsigned long leon3_gptimer_irq; +extern unsigned int sparc_leon_eirq; + +#endif /* __ASSEMBLY__ */ + +#define LEON3_IO_AREA 0xfff00000 +#define LEON3_CONF_AREA 0xff000 +#define LEON3_AHB_SLAVE_CONF_AREA (1 << 11) + +#define LEON3_AHB_CONF_WORDS 8 +#define LEON3_APB_CONF_WORDS 2 +#define LEON3_AHB_MASTERS 16 +#define LEON3_AHB_SLAVES 16 +#define LEON3_APB_SLAVES 16 +#define LEON3_APBUARTS 8 + +/* Vendor codes */ +#define VENDOR_GAISLER 1 +#define VENDOR_PENDER 2 +#define VENDOR_ESA 4 +#define VENDOR_OPENCORES 8 + +/* Gaisler Research device id's */ +#define GAISLER_LEON3 0x003 +#define GAISLER_LEON3DSU 0x004 +#define GAISLER_ETHAHB 0x005 +#define GAISLER_APBMST 0x006 +#define GAISLER_AHBUART 0x007 +#define GAISLER_SRCTRL 0x008 +#define GAISLER_SDCTRL 0x009 +#define GAISLER_APBUART 0x00C +#define GAISLER_IRQMP 0x00D +#define GAISLER_AHBRAM 0x00E +#define GAISLER_GPTIMER 0x011 +#define GAISLER_PCITRG 0x012 +#define GAISLER_PCISBRG 0x013 +#define GAISLER_PCIFBRG 0x014 +#define GAISLER_PCITRACE 0x015 +#define GAISLER_PCIDMA 0x016 +#define GAISLER_AHBTRACE 0x017 +#define GAISLER_ETHDSU 0x018 +#define GAISLER_PIOPORT 0x01A +#define GAISLER_GRGPIO 0x01A +#define GAISLER_AHBJTAG 0x01c +#define GAISLER_ETHMAC 0x01D +#define GAISLER_AHB2AHB 0x020 +#define GAISLER_USBDC 0x021 +#define GAISLER_ATACTRL 0x024 +#define GAISLER_DDRSPA 0x025 +#define GAISLER_USBEHC 0x026 +#define GAISLER_USBUHC 0x027 +#define GAISLER_I2CMST 0x028 +#define GAISLER_SPICTRL 0x02D +#define GAISLER_DDR2SPA 0x02E +#define GAISLER_SPIMCTRL 0x045 +#define GAISLER_LEON4 0x048 +#define GAISLER_LEON4DSU 0x049 +#define GAISLER_AHBSTAT 0x052 +#define GAISLER_FTMCTRL 0x054 +#define GAISLER_KBD 0x060 +#define GAISLER_VGA 0x061 +#define GAISLER_SVGA 0x063 +#define GAISLER_GRSYSMON 0x066 +#define GAISLER_GRACECTRL 0x067 + +#define GAISLER_L2TIME 0xffd /* internal device: leon2 timer */ +#define GAISLER_L2C 0xffe /* internal device: leon2compat */ +#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */ + +#define amba_vendor(x) (((x) >> 24) & 0xff) + +#define amba_device(x) (((x) >> 12) & 0xfff) + +#endif /* !defined(CONFIG_SPARC_LEON) */ + +#endif diff --git a/arch/sparc/include/asm/machines.h b/arch/sparc/include/asm/machines.h index c28c2f248794..cd9c099567e4 100644 --- a/arch/sparc/include/asm/machines.h +++ b/arch/sparc/include/asm/machines.h @@ -15,7 +15,7 @@ struct Sun_Machine_Models { /* Current number of machines we know about that has an IDPROM * machtype entry including one entry for the 0x80 OBP machines. */ -#define NUM_SUN_MACHINES 15 +#define NUM_SUN_MACHINES 16 /* The machine type in the idprom area looks like this: * @@ -30,6 +30,7 @@ struct Sun_Machine_Models { #define SM_ARCH_MASK 0xf0 #define SM_SUN4 0x20 +#define M_LEON 0x30 #define SM_SUN4C 0x50 #define SM_SUN4M 0x70 #define SM_SUN4M_OBP 0x80 @@ -41,6 +42,9 @@ struct Sun_Machine_Models { #define SM_4_330 0x03 /* Sun 4/300 series */ #define SM_4_470 0x04 /* Sun 4/400 series */ +/* Leon machines */ +#define M_LEON3_SOC 0x02 /* Leon3 SoC */ + /* Sun4c machines Full Name - PROM NAME */ #define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */ #define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */ diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h index fbd546dd4feb..72e6500e7ab0 100644 --- a/arch/sparc/include/asm/nmi.h +++ b/arch/sparc/include/asm/nmi.h @@ -5,6 +5,9 @@ extern int __init nmi_init(void); extern void perfctr_irq(int irq, struct pt_regs *regs); extern void nmi_adjust_hz(unsigned int new_hz); -extern int nmi_usable; +extern atomic_t nmi_active; + +extern void start_nmi_watchdog(void *unused); +extern void stop_nmi_watchdog(void *unused); #endif /* __NMI_H */ diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h index 6e14fd179335..d9c031f9910f 100644 --- a/arch/sparc/include/asm/pci.h +++ b/arch/sparc/include/asm/pci.h @@ -5,4 +5,7 @@ #else #include <asm/pci_32.h> #endif + +#include <asm-generic/pci-dma-compat.h> + #endif diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index 810d9248e23f..e769f668a4b5 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -30,42 +30,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) */ #define PCI_DMA_BUS_IS_PHYS (0) -#include <asm/scatterlist.h> - struct pci_dev; -/* Allocate and map kernel buffer using consistent mode DMA for a device. - * hwdev should be valid struct pci_dev pointer for PCI devices. - */ -extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle); - -/* Free and unmap a consistent DMA buffer. - * cpu_addr is what was returned from pci_alloc_consistent, - * size must be the same as what as passed into pci_alloc_consistent, - * and likewise dma_addr must be the same as what *dma_addrp was set to. - * - * References to the memory and mappings assosciated with cpu_addr/dma_addr - * past this call are illegal. - */ -extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); - -/* Map a single buffer of the indicated size for DMA in streaming mode. - * The 32-bit bus address to use is returned. - * - * Once the device is given the dma address, the device owns this memory - * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed. - */ -extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction); - -/* Unmap a single streaming mode DMA translation. The dma_addr and size - * must match what was provided for in a previous pci_map_single call. All - * other usages are undefined. - * - * After this call, reads by the cpu to the buffer are guaranteed to see - * whatever the device wrote there. - */ -extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction); - /* pci_unmap_{single,page} is not a nop, thus... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; @@ -80,69 +46,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) -/* - * Same as above, only with pages instead of mapped addresses. - */ -extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction); -extern void pci_unmap_page(struct pci_dev *hwdev, - dma_addr_t dma_address, size_t size, int direction); - -/* Map a set of buffers described by scatterlist in streaming - * mode for DMA. This is the scather-gather version of the - * above pci_map_single interface. Here the scatter gather list - * elements are each tagged with the appropriate dma address - * and length. They are obtained via sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for pci_map_single are - * the same here. - */ -extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction); - -/* Unmap a set of streaming mode DMA translations. - * Again, cpu read rules concerning calls here are the same as for - * pci_unmap_single() above. - */ -extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction); - -/* Make physical memory consistent for a single - * streaming mode DMA translation after a transfer. - * - * If you perform a pci_map_single() but wish to interrogate the - * buffer using the cpu, yet do not wish to teardown the PCI dma - * mapping, you must call this function before doing so. At the - * next point you give the PCI dma address back to the card, you - * must first perform a pci_dma_sync_for_device, and then the device - * again owns the buffer. - */ -extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); -extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction); - -/* Make physical memory consistent for a set of streaming - * mode DMA translations after a transfer. - * - * The same as pci_dma_sync_single_* but for a scatter-gather list, - * same rules and usage. - */ -extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); -extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction); - -/* Return whether the given PCI device DMA address mask can - * be supported properly. For example, if your device can - * only drive the low 24-bits during PCI bus mastering, then - * you would pass 0x00ffffff as the mask to this function. - */ -static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) -{ - return 1; -} - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, @@ -153,14 +56,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) - -static inline int pci_dma_mapping_error(struct pci_dev *pdev, - dma_addr_t dma_addr) -{ - return (dma_addr == PCI_DMA_ERROR_CODE); -} - struct device_node; extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index a32970888287..b63e51c3c3ee 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -34,37 +34,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) */ #define PCI_DMA_BUS_IS_PHYS (0) -static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, - dma_addr_t *dma_handle) -{ - return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC); -} - -static inline void pci_free_consistent(struct pci_dev *pdev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle); -} - -static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, - size_t size, int direction) -{ - return dma_map_single(&pdev->dev, ptr, size, - (enum dma_data_direction) direction); -} - -static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, - size_t size, int direction) -{ - dma_unmap_single(&pdev->dev, dma_addr, size, - (enum dma_data_direction) direction); -} - -#define pci_map_page(dev, page, off, size, dir) \ - pci_map_single(dev, (page_address(page) + (off)), size, dir) -#define pci_unmap_page(dev,addr,sz,dir) \ - pci_unmap_single(dev,addr,sz,dir) - /* pci_unmap_{single,page} is not a nop, thus... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ dma_addr_t ADDR_NAME; @@ -79,57 +48,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) -static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, - int nents, int direction) -{ - return dma_map_sg(&pdev->dev, sg, nents, - (enum dma_data_direction) direction); -} - -static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, - int nents, int direction) -{ - dma_unmap_sg(&pdev->dev, sg, nents, - (enum dma_data_direction) direction); -} - -static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, - dma_addr_t dma_handle, - size_t size, int direction) -{ - dma_sync_single_for_cpu(&pdev->dev, dma_handle, size, - (enum dma_data_direction) direction); -} - -static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev, - dma_addr_t dma_handle, - size_t size, int direction) -{ - /* No flushing needed to sync cpu writes to the device. */ -} - -static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, - struct scatterlist *sg, - int nents, int direction) -{ - dma_sync_sg_for_cpu(&pdev->dev, sg, nents, - (enum dma_data_direction) direction); -} - -static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev, - struct scatterlist *sg, - int nelems, int direction) -{ - /* No flushing needed to sync cpu writes to the device. */ -} - -/* Return whether the given PCI device DMA address mask can - * be supported properly. For example, if your device can - * only drive the low 24-bits during PCI bus mastering, then - * you would pass 0x00ffffff as the mask to this function. - */ -extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); - /* PCI IOMMU mapping bypass support. */ /* PCI 64-bit addressing works for all slots on all controller @@ -139,12 +57,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) #define PCI64_ADDR_BASE 0xfffc000000000000UL -static inline int pci_dma_mapping_error(struct pci_dev *pdev, - dma_addr_t dma_addr) -{ - return dma_mapping_error(&pdev->dev, dma_addr); -} - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h new file mode 100644 index 000000000000..5d7a8ca0e491 --- /dev/null +++ b/arch/sparc/include/asm/perf_counter.h @@ -0,0 +1,14 @@ +#ifndef __ASM_SPARC_PERF_COUNTER_H +#define __ASM_SPARC_PERF_COUNTER_H + +extern void set_perf_counter_pending(void); + +#define PERF_COUNTER_INDEX_OFFSET 0 + +#ifdef CONFIG_PERF_COUNTERS +extern void init_hw_perf_counters(void); +#else +static inline void init_hw_perf_counters(void) { } +#endif + +#endif diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 808555fc1d58..1407c07bdade 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h @@ -267,6 +267,7 @@ static inline void srmmu_flush_tlb_page(unsigned long page) } +#ifndef CONFIG_SPARC_LEON static inline unsigned long srmmu_hwprobe(unsigned long vaddr) { unsigned long retval; @@ -278,6 +279,9 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr) return retval; } +#else +#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK) +#endif static inline int srmmu_get_pte (unsigned long addr) diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index be8d7aaeb60d..82a190d7efc1 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -118,5 +118,8 @@ extern struct device_node *of_console_device; extern char *of_console_path; extern char *of_console_options; +extern void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp); +extern char *build_full_name(struct device_node *dp); + #endif /* __KERNEL__ */ #endif /* _SPARC_PROM_H */ diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h index 982a12f959f4..3a5ae3d12088 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/asm/socket.h @@ -29,6 +29,9 @@ #define SO_RCVBUFFORCE 0x100b #define SO_ERROR 0x1007 #define SO_TYPE 0x1008 +#define SO_PROTOCOL 0x1028 +#define SO_DOMAIN 0x1029 + /* Linux specific, keep the same. */ #define SO_NO_CHECK 0x000b diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 46f91ab66a50..857630cff636 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void __read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(raw_rwlock_t *rw) { register raw_rwlock_t *lp asm("g1"); lp = rw; @@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw) #define __raw_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - __read_lock(lock); \ + arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void __read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(raw_rwlock_t *rw) { register raw_rwlock_t *lp asm("g1"); lp = rw; @@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw) #define __raw_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - __read_unlock(lock); \ + arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) @@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (val == 0); } -static inline int __read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(raw_rwlock_t *rw) { register raw_rwlock_t *lp asm("g1"); register int res asm("o0"); @@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw) ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ - res = __read_trylock(lock); \ + res = arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index f6b2b92ad8d2..43e514783582 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline __read_lock(raw_rwlock_t *lock) +static void inline arch_read_lock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock) : "memory"); } -static int inline __read_trylock(raw_rwlock_t *lock) +static int inline arch_read_trylock(raw_rwlock_t *lock) { int tmp1, tmp2; @@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock) return tmp1; } -static void inline __read_unlock(raw_rwlock_t *lock) +static void inline arch_read_unlock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock) : "memory"); } -static void inline __write_lock(raw_rwlock_t *lock) +static void inline arch_write_lock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock) : "memory"); } -static void inline __write_unlock(raw_rwlock_t *lock) +static void inline arch_write_unlock(raw_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock) : "memory"); } -static int inline __write_trylock(raw_rwlock_t *lock) +static int inline arch_write_trylock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; @@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock) return result; } -#define __raw_read_lock(p) __read_lock(p) -#define __raw_read_lock_flags(p, f) __read_lock(p) -#define __raw_read_trylock(p) __read_trylock(p) -#define __raw_read_unlock(p) __read_unlock(p) -#define __raw_write_lock(p) __write_lock(p) -#define __raw_write_lock_flags(p, f) __write_lock(p) -#define __raw_write_unlock(p) __write_unlock(p) -#define __raw_write_trylock(p) __write_trylock(p) +#define __raw_read_lock(p) arch_read_lock(p) +#define __raw_read_lock_flags(p, f) arch_read_lock(p) +#define __raw_read_trylock(p) arch_read_trylock(p) +#define __raw_read_unlock(p) arch_read_unlock(p) +#define __raw_write_lock(p) arch_write_lock(p) +#define __raw_write_lock_flags(p, f) arch_write_lock(p) +#define __raw_write_unlock(p) arch_write_unlock(p) +#define __raw_write_trylock(p) arch_write_trylock(p) #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) #define __raw_write_can_lock(rw) (!(rw)->lock) diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h index 751c8c17f5a0..890036b3689a 100644 --- a/arch/sparc/include/asm/system_32.h +++ b/arch/sparc/include/asm/system_32.h @@ -32,6 +32,7 @@ enum sparc_cpu { sun4u = 0x05, /* V8 ploos ploos */ sun_unknown = 0x06, ap1000 = 0x07, /* almost a sun4m */ + sparc_leon = 0x08, /* Leon SoC */ }; /* Really, userland should not be looking at any of this... */ diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 6c077816ab28..25e848f0cad7 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -29,6 +29,10 @@ enum sparc_cpu { /* This cannot ever be a sun4c :) That's just history. */ #define ARCH_SUN4C 0 +extern const char *sparc_cpu_type; +extern const char *sparc_fpu_type; +extern const char *sparc_pmu_type; + extern char reboot_command[]; /* These are here in an effort to more fully work around Spitfire Errata diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h index de671d73baed..09c79a9c8516 100644 --- a/arch/sparc/include/asm/types.h +++ b/arch/sparc/include/asm/types.h @@ -8,9 +8,8 @@ * need to be careful to avoid a name clashes. */ -#if defined(__sparc__) && defined(__arch64__) +#if defined(__sparc__) -/*** SPARC 64 bit ***/ #include <asm-generic/int-ll64.h> #ifndef __ASSEMBLY__ @@ -26,33 +25,21 @@ typedef unsigned short umode_t; /* Dma addresses come in generic and 64-bit flavours. */ typedef u32 dma_addr_t; -typedef u64 dma64_addr_t; -#endif /* __ASSEMBLY__ */ +#if defined(__arch64__) -#endif /* __KERNEL__ */ +/*** SPARC 64 bit ***/ +typedef u64 dma64_addr_t; #else - /*** SPARC 32 bit ***/ -#include <asm-generic/int-ll64.h> - -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; - -#endif /* __ASSEMBLY__ */ - -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ - -typedef u32 dma_addr_t; typedef u32 dma64_addr_t; +#endif /* defined(__arch64__) */ + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ -#endif /* defined(__sparc__) && defined(__arch64__) */ +#endif /* defined(__sparc__) */ #endif /* defined(_SPARC_TYPES_H) */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index a38c03238918..9ea271e19c70 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -7,8 +7,8 @@ #ifdef __KERNEL__ #include <linux/compiler.h> -#include <linux/sched.h> #include <linux/string.h> +#include <linux/thread_info.h> #include <asm/asi.h> #include <asm/system.h> #include <asm/spitfire.h> diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index b2c406de7d4f..706df669f3b8 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -395,8 +395,9 @@ #define __NR_preadv 324 #define __NR_pwritev 325 #define __NR_rt_tgsigqueueinfo 326 +#define __NR_perf_counter_open 327 -#define NR_SYSCALLS 327 +#define NR_SYSCALLS 328 #ifdef __32bit_syscall_numbers__ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 475ce4696acd..247cc620cee5 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -41,6 +41,8 @@ obj-y += of_device_common.o obj-y += of_device_$(BITS).o obj-$(CONFIG_SPARC64) += prom_irqtrans.o +obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o + obj-$(CONFIG_SPARC64) += reboot.o obj-$(CONFIG_SPARC64) += sysfs.o obj-$(CONFIG_SPARC64) += iommu.o @@ -61,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o obj-$(CONFIG_SPARC32) += devres.o devres-y := ../../../kernel/irq/devres.o -obj-$(CONFIG_SPARC32) += dma.o +obj-y += dma.o obj-$(CONFIG_SPARC32_PCI) += pcic.o @@ -101,3 +103,6 @@ obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o obj-$(CONFIG_AUDIT) += audit.o audit--$(CONFIG_AUDIT) := compat_audit.o obj-$(CONFIG_COMPAT) += $(audit--y) + +pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o +obj-$(CONFIG_SPARC64) += $(pc--y) diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index d85c3dc4953a..1446df90ef85 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -312,7 +312,12 @@ void __cpuinit cpu_probe(void) psr = get_psr(); put_psr(psr | PSR_EF); +#ifdef CONFIG_SPARC_LEON + fpu_vers = 7; +#else fpu_vers = ((get_fsr() >> 17) & 0x7); +#endif + put_psr(psr); set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index 524c32f97c55..e1ba8ee21b9a 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c @@ -1,178 +1,13 @@ -/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc. - * - * Copyright (C) 2008 David S. Miller <davem@davemloft.net> - */ - #include <linux/kernel.h> #include <linux/module.h> #include <linux/dma-mapping.h> -#include <linux/scatterlist.h> -#include <linux/mm.h> - -#ifdef CONFIG_PCI -#include <linux/pci.h> -#endif +#include <linux/dma-debug.h> -#include "dma.h" +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15) -int dma_supported(struct device *dev, u64 mask) +static int __init dma_init(void) { -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_dma_supported(to_pci_dev(dev), mask); -#endif + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } -EXPORT_SYMBOL(dma_supported); - -int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_set_dma_mask(to_pci_dev(dev), dma_mask); -#endif - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(dma_set_mask); - -static void *dma32_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); -#endif - return sbus_alloc_consistent(dev, size, dma_handle); -} - -static void dma32_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_free_consistent(to_pci_dev(dev), size, - cpu_addr, dma_handle); - return; - } -#endif - sbus_free_consistent(dev, size, cpu_addr, dma_handle); -} - -static dma_addr_t dma32_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_map_page(to_pci_dev(dev), page, offset, - size, (int)direction); -#endif - return sbus_map_single(dev, page_address(page) + offset, - size, (int)direction); -} - -static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_unmap_page(to_pci_dev(dev), dma_address, - size, (int)direction); - return; - } -#endif - sbus_unmap_single(dev, dma_address, size, (int)direction); -} - -static int dma32_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); -#endif - return sbus_map_sg(dev, sg, nents, direction); -} - -void dma32_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction); - return; - } -#endif - sbus_unmap_sg(dev, sg, nents, (int)direction); -} - -static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, - size, (int)direction); - return; - } -#endif - sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); -} - -static void dma32_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, - size, (int)direction); - return; - } -#endif - sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); -} - -static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, - nelems, (int)direction); - return; - } -#endif - BUG(); -} - -static void dma32_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, - nelems, (int)direction); - return; - } -#endif - BUG(); -} - -static const struct dma_ops dma32_dma_ops = { - .alloc_coherent = dma32_alloc_coherent, - .free_coherent = dma32_free_coherent, - .map_page = dma32_map_page, - .unmap_page = dma32_unmap_page, - .map_sg = dma32_map_sg, - .unmap_sg = dma32_unmap_sg, - .sync_single_for_cpu = dma32_sync_single_for_cpu, - .sync_single_for_device = dma32_sync_single_for_device, - .sync_sg_for_cpu = dma32_sync_sg_for_cpu, - .sync_sg_for_device = dma32_sync_sg_for_device, -}; - -const struct dma_ops *dma_ops = &dma32_dma_ops; -EXPORT_SYMBOL(dma_ops); +fs_initcall(dma_init); diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h deleted file mode 100644 index f8d8951adb53..000000000000 --- a/arch/sparc/kernel/dma.h +++ /dev/null @@ -1,14 +0,0 @@ -void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp); -void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba); -dma_addr_t sbus_map_single(struct device *dev, void *va, - size_t len, int direction); -void sbus_unmap_single(struct device *dev, dma_addr_t ba, - size_t n, int direction); -int sbus_map_sg(struct device *dev, struct scatterlist *sg, - int n, int direction); -void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, - int n, int direction); -void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, - size_t size, int direction); -void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, - size_t size, int direction); diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 6b4d8acc4c83..439d82a95ac9 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -809,6 +809,11 @@ found_version: nop got_prop: +#ifdef CONFIG_SPARC_LEON + /* no cpu-type check is needed, it is a SPARC-LEON */ + ba sun4c_continue_boot + nop +#endif set cputypval, %o2 ldub [%o2 + 0x4], %l1 diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c index 57922f69c3f7..52a15fe2db19 100644 --- a/arch/sparc/kernel/idprom.c +++ b/arch/sparc/kernel/idprom.c @@ -31,6 +31,8 @@ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, +/* Now Leon */ +{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) }, /* Now, Sun4c's */ { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 0aeaefe696b9..7690cc219ecc 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, - enum dma_data_direction direction) + enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -474,7 +475,8 @@ do_flush_sync: } static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, - size_t sz, enum dma_data_direction direction) + size_t sz, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, } static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot, ctx; @@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) } static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { unsigned long flags, ctx; struct scatterlist *sg; @@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static const struct dma_ops sun4u_dma_ops = { +static struct dma_map_ops sun4u_dma_ops = { .alloc_coherent = dma_4u_alloc_coherent, .free_coherent = dma_4u_free_coherent, .map_page = dma_4u_map_page, @@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = { .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -const struct dma_ops *dma_ops = &sun4u_dma_ops; +struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); +extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); + int dma_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; @@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask) #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) - return pci_dma_supported(to_pci_dev(dev), device_mask); + return pci64_dma_supported(to_pci_dev(dev), device_mask); #endif return 0; diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 87ea0d03d975..9f61fd8cbb7b 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/pci.h> /* struct pci_dev */ #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/scatterlist.h> #include <linux/of_device.h> @@ -48,8 +49,6 @@ #include <asm/iommu.h> #include <asm/io-unit.h> -#include "dma.h" - #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ static struct resource *_sparc_find_resource(struct resource *r, @@ -246,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64); * Typically devices use them for control blocks. * CPU may access them without any explicit flushing. */ -void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) +static void *sbus_alloc_coherent(struct device *dev, size_t len, + dma_addr_t *dma_addrp, gfp_t gfp) { struct of_device *op = to_of_device(dev); unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; @@ -299,7 +299,8 @@ err_nopages: return NULL; } -void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) +static void sbus_free_coherent(struct device *dev, size_t n, void *p, + dma_addr_t ba) { struct resource *res; struct page *pgv; @@ -317,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) n = (n + PAGE_SIZE-1) & PAGE_MASK; if ((res->end-res->start)+1 != n) { - printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", + printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", (long)((res->end-res->start)+1), n); return; } @@ -337,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) * CPU view of this memory may be inconsistent with * a device view and explicit flushing is necessary. */ -dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) +static dma_addr_t sbus_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t len, + enum dma_data_direction dir, + struct dma_attrs *attrs) { + void *va = page_address(page) + offset; + /* XXX why are some lengths signed, others unsigned? */ if (len <= 0) { return 0; @@ -350,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi return mmu_get_scsi_one(dev, va, len); } -void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) +static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, + enum dma_data_direction dir, struct dma_attrs *attrs) { mmu_release_scsi_one(dev, ba, n); } -int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) +static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, + enum dma_data_direction dir, struct dma_attrs *attrs) { mmu_get_scsi_sgl(dev, sg, n); @@ -366,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction return n; } -void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) +static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, + enum dma_data_direction dir, struct dma_attrs *attrs) { mmu_release_scsi_sgl(dev, sg, n); } -void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) +static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int n, enum dma_data_direction dir) { + BUG(); } -void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) +static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int n, enum dma_data_direction dir) { + BUG(); } +struct dma_map_ops sbus_dma_ops = { + .alloc_coherent = sbus_alloc_coherent, + .free_coherent = sbus_free_coherent, + .map_page = sbus_map_page, + .unmap_page = sbus_unmap_page, + .map_sg = sbus_map_sg, + .unmap_sg = sbus_unmap_sg, + .sync_sg_for_cpu = sbus_sync_sg_for_cpu, + .sync_sg_for_device = sbus_sync_sg_for_device, +}; + +struct dma_map_ops *dma_ops = &sbus_dma_ops; +EXPORT_SYMBOL(dma_ops); + static int __init sparc_register_ioport(void) { register_proc_sparc_ioport(); @@ -395,7 +422,8 @@ arch_initcall(sparc_register_ioport); /* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. */ -void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) +static void *pci32_alloc_coherent(struct device *dev, size_t len, + dma_addr_t *pba, gfp_t gfp) { unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; unsigned long va; @@ -439,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ return (void *) res->start; } -EXPORT_SYMBOL(pci_alloc_consistent); /* Free and unmap a consistent DMA buffer. * cpu_addr is what was returned from pci_alloc_consistent, @@ -449,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent); * References to the memory and mappings associated with cpu_addr/dma_addr * past this call are illegal. */ -void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) +static void pci32_free_coherent(struct device *dev, size_t n, void *p, + dma_addr_t ba) { struct resource *res; unsigned long pgp; @@ -481,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) free_pages(pgp, get_order(n)); } -EXPORT_SYMBOL(pci_free_consistent); - -/* Map a single buffer of the indicated size for DMA in streaming mode. - * The 32-bit bus address to use is returned. - * - * Once the device is given the dma address, the device owns this memory - * until either pci_unmap_single or pci_dma_sync_single_* is performed. - */ -dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, - int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* IIep is write-through, not flushing. */ - return virt_to_phys(ptr); -} -EXPORT_SYMBOL(pci_map_single); - -/* Unmap a single streaming mode DMA translation. The dma_addr and size - * must match what was provided for in a previous pci_map_single call. All - * other usages are undefined. - * - * After this call, reads by the cpu to the buffer are guaranteed to see - * whatever the device wrote there. - */ -void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, - int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { - mmu_inval_dma_area((unsigned long)phys_to_virt(ba), - (size + PAGE_SIZE-1) & PAGE_MASK); - } -} -EXPORT_SYMBOL(pci_unmap_single); /* * Same as pci_map_single, but with pages. */ -dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) +static dma_addr_t pci32_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { - BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ return page_to_phys(page) + offset; } -EXPORT_SYMBOL(pci_map_page); - -void pci_unmap_page(struct pci_dev *hwdev, - dma_addr_t dma_address, size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* mmu_inval_dma_area XXX */ -} -EXPORT_SYMBOL(pci_unmap_page); /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scather-gather version of the @@ -551,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page); * Device ownership issues as mentioned above for pci_map_single are * the same here. */ -int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, - int direction) +static int pci32_map_sg(struct device *device, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); @@ -566,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, } return nents; } -EXPORT_SYMBOL(pci_map_sg); /* Unmap a set of streaming mode DMA translations. * Again, cpu read rules concerning calls here are the same as for * pci_unmap_single() above. */ -void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, - int direction) +static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( @@ -588,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, } } } -EXPORT_SYMBOL(pci_unmap_sg); /* Make physical memory consistent for a single * streaming mode DMA translation before or after a transfer. @@ -600,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg); * must first perform a pci_dma_sync_for_device, and then the * device again owns the buffer. */ -void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) +static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, + size_t size, enum dma_data_direction dir) { - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); } } -EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); -void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) +static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, + size_t size, enum dma_data_direction dir) { - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); } } -EXPORT_SYMBOL(pci_dma_sync_single_for_device); /* Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. @@ -626,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device); * The same as pci_dma_sync_single_* but for a scatter-gather list, * same rules and usage. */ -void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) +static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( @@ -641,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int } } } -EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); -void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) +static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( @@ -658,31 +639,78 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, } } } -EXPORT_SYMBOL(pci_dma_sync_sg_for_device); + +struct dma_map_ops pci32_dma_ops = { + .alloc_coherent = pci32_alloc_coherent, + .free_coherent = pci32_free_coherent, + .map_page = pci32_map_page, + .map_sg = pci32_map_sg, + .unmap_sg = pci32_unmap_sg, + .sync_single_for_cpu = pci32_sync_single_for_cpu, + .sync_single_for_device = pci32_sync_single_for_device, + .sync_sg_for_cpu = pci32_sync_sg_for_cpu, + .sync_sg_for_device = pci32_sync_sg_for_device, +}; +EXPORT_SYMBOL(pci32_dma_ops); + #endif /* CONFIG_PCI */ +/* + * Return whether the given PCI device DMA address mask can be + * supported properly. For example, if your device can only drive the + * low 24-bits during PCI bus mastering, then you would pass + * 0x00ffffff as the mask to this function. + */ +int dma_supported(struct device *dev, u64 mask) +{ +#ifdef CONFIG_PCI + if (dev->bus == &pci_bus_type) + return 1; +#endif + return 0; +} +EXPORT_SYMBOL(dma_supported); + +int dma_set_mask(struct device *dev, u64 dma_mask) +{ +#ifdef CONFIG_PCI + if (dev->bus == &pci_bus_type) + return pci_set_dma_mask(to_pci_dev(dev), dma_mask); +#endif + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(dma_set_mask); + + #ifdef CONFIG_PROC_FS -static int -_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, - void *data) +static int sparc_io_proc_show(struct seq_file *m, void *v) { - char *p = buf, *e = buf + length; - struct resource *r; + struct resource *root = m->private, *r; const char *nm; - for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { - if (p + 32 >= e) /* Better than nothing */ - break; + for (r = root->child; r != NULL; r = r->sibling) { if ((nm = r->name) == 0) nm = "???"; - p += sprintf(p, "%016llx-%016llx: %s\n", + seq_printf(m, "%016llx-%016llx: %s\n", (unsigned long long)r->start, (unsigned long long)r->end, nm); } - return p-buf; + return 0; } +static int sparc_io_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, sparc_io_proc_show, PDE(inode)->data); +} + +static const struct file_operations sparc_io_proc_fops = { + .owner = THIS_MODULE, + .open = sparc_io_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; #endif /* CONFIG_PROC_FS */ /* @@ -707,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root, static void register_proc_sparc_ioport(void) { #ifdef CONFIG_PROC_FS - create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); - create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); + proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap); + proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma); #endif } diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index ad800b80c718..e1af43728329 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -45,6 +45,7 @@ #include <asm/pcic.h> #include <asm/cacheflush.h> #include <asm/irq_regs.h> +#include <asm/leon.h> #include "kernel.h" #include "irq.h" @@ -661,6 +662,10 @@ void __init init_IRQ(void) sun4d_init_IRQ(); break; + case sparc_leon: + leon_init_IRQ(); + break; + default: prom_printf("Cannot initialize IRQs on this Sun machine..."); break; diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c new file mode 100644 index 000000000000..54d8a5bd4824 --- /dev/null +++ b/arch/sparc/kernel/leon_kernel.c @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/interrupt.h> +#include <linux/of_device.h> +#include <asm/oplib.h> +#include <asm/timer.h> +#include <asm/prom.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> + +#include "prom.h" +#include "irq.h" + +struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */ +struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */ +struct amba_apb_device leon_percpu_timer_dev[16]; + +int leondebug_irq_disable; +int leon_debug_irqout; +static int dummy_master_l10_counter; + +unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */ +unsigned int sparc_leon_eirq; +#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0])) + +/* Return the IRQ of the pending IRQ on the extended IRQ controller */ +int sparc_leon_eirq_get(int eirq, int cpu) +{ + return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; +} + +irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id) +{ + printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n"); + return IRQ_HANDLED; +} + +/* The extended IRQ controller has been found, this function registers it */ +void sparc_leon_eirq_register(int eirq) +{ + int irq; + + /* Register a "BAD" handler for this interrupt, it should never happen */ + irq = request_irq(eirq, sparc_leon_eirq_isr, + (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL); + + if (irq) { + printk(KERN_ERR + "sparc_leon_eirq_register: unable to attach IRQ%d\n", + eirq); + } else { + sparc_leon_eirq = eirq; + } + +} + +static inline unsigned long get_irqmask(unsigned int irq) +{ + unsigned long mask; + + if (!irq || ((irq > 0xf) && !sparc_leon_eirq) + || ((irq > 0x1f) && sparc_leon_eirq)) { + printk(KERN_ERR + "leon_get_irqmask: false irq number: %d\n", irq); + mask = 0; + } else { + mask = LEON_HARD_INT(irq); + } + return mask; +} + +static void leon_enable_irq(unsigned int irq_nr) +{ + unsigned long mask, flags; + mask = get_irqmask(irq_nr); + local_irq_save(flags); + LEON3_BYPASS_STORE_PA(LEON_IMASK, + (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask))); + local_irq_restore(flags); +} + +static void leon_disable_irq(unsigned int irq_nr) +{ + unsigned long mask, flags; + mask = get_irqmask(irq_nr); + local_irq_save(flags); + LEON3_BYPASS_STORE_PA(LEON_IMASK, + (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask))); + local_irq_restore(flags); + +} + +void __init leon_init_timers(irq_handler_t counter_fn) +{ + int irq; + + leondebug_irq_disable = 0; + leon_debug_irqout = 0; + master_l10_counter = (unsigned int *)&dummy_master_l10_counter; + dummy_master_l10_counter = 0; + + if (leon3_gptimer_regs && leon3_irqctrl_regs) { + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, + (((1000000 / 100) - 1))); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); + + } else { + printk(KERN_ERR "No Timer/irqctrl found\n"); + BUG(); + } + + irq = request_irq(leon3_gptimer_irq, + counter_fn, + (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); + + if (irq) { + printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n", + LEON_INTERRUPT_TIMER1); + prom_halt(); + } + + if (leon3_gptimer_regs) { + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, + LEON3_GPTIMER_EN | + LEON3_GPTIMER_RL | + LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); + } +} + +void leon_clear_clock_irq(void) +{ +} + +void leon_load_profile_irq(int cpu, unsigned int limit) +{ + BUG(); +} + + + + +void __init leon_trans_init(struct device_node *dp) +{ + if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) { + struct property *p; + p = of_find_property(dp, "mid", (void *)0); + if (p) { + int mid; + dp->name = prom_early_alloc(5 + 1); + memcpy(&mid, p->value, p->length); + sprintf((char *)dp->name, "cpu%.2d", mid); + } + } +} + +void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0; + +void __init leon_node_init(struct device_node *dp, struct device_node ***nextp) +{ + if (prom_amba_init && + strcmp(dp->type, "ambapp") == 0 && + strcmp(dp->name, "ambapp0") == 0) { + prom_amba_init(dp, nextp); + } +} + +void __init leon_init_IRQ(void) +{ + sparc_init_timers = leon_init_timers; + + BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq, + BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq, + BTFIXUPCALL_NOP); + +#ifdef CONFIG_SMP + BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM); +#endif + +} + +void __init leon_init(void) +{ + prom_build_more = &leon_node_init; +} diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index b75bf502cd42..378eb53e0776 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/smp.h> +#include <asm/perf_counter.h> #include <asm/ptrace.h> #include <asm/local.h> #include <asm/pcr.h> @@ -31,13 +32,19 @@ * level 14 as our IRQ off level. */ -static int nmi_watchdog_active; static int panic_on_timeout; -int nmi_usable; -EXPORT_SYMBOL_GPL(nmi_usable); +/* nmi_active: + * >0: the NMI watchdog is active, but can be disabled + * <0: the NMI watchdog has not been set up, and cannot be enabled + * 0: the NMI watchdog is disabled, but can be enabled + */ +atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ +EXPORT_SYMBOL(nmi_active); static unsigned int nmi_hz = HZ; +static DEFINE_PER_CPU(short, wd_enabled); +static int endflag __initdata; static DEFINE_PER_CPU(unsigned int, last_irq_sum); static DEFINE_PER_CPU(local_t, alert_counter); @@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch); void touch_nmi_watchdog(void) { - if (nmi_watchdog_active) { + if (atomic_read(&nmi_active)) { int cpu; for_each_present_cpu(cpu) { @@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) if (do_panic || panic_on_oops) panic("Non maskable interrupt"); + nmi_exit(); local_irq_enable(); do_exit(SIGBUS); } @@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) local_cpu_data().__nmi_count++; + nmi_enter(); + if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; @@ -110,10 +120,12 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); } - if (nmi_usable) { + if (__get_cpu_var(wd_enabled)) { write_pic(picl_value(nmi_hz)); pcr_ops->write(pcr_enable); } + + nmi_exit(); } static inline unsigned int get_nmi_count(int cpu) @@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu) return cpu_data(cpu).__nmi_count; } -static int endflag __initdata; - static __init void nmi_cpu_busy(void *data) { local_irq_enable_in_hardirq(); @@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) printk(KERN_WARNING "and attach the output of the 'dmesg' command.\n"); - nmi_usable = 0; + per_cpu(wd_enabled, cpu) = 0; + atomic_dec(&nmi_active); } -static void stop_watchdog(void *unused) +void stop_nmi_watchdog(void *unused) { pcr_ops->write(PCR_PIC_PRIV); + __get_cpu_var(wd_enabled) = 0; + atomic_dec(&nmi_active); } static int __init check_nmi_watchdog(void) @@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void) unsigned int *prev_nmi_count; int cpu, err; + if (!atomic_read(&nmi_active)) + return 0; + prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); if (!prev_nmi_count) { err = -ENOMEM; @@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void) mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ for_each_online_cpu(cpu) { + if (!per_cpu(wd_enabled, cpu)) + continue; if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) report_broken_nmi(cpu, prev_nmi_count); } endflag = 1; - if (!nmi_usable) { + if (!atomic_read(&nmi_active)) { kfree(prev_nmi_count); + atomic_set(&nmi_active, -1); err = -ENODEV; goto error; } @@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void) kfree(prev_nmi_count); return 0; error: - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); return err; } -static void start_watchdog(void *unused) +void start_nmi_watchdog(void *unused) { + __get_cpu_var(wd_enabled) = 1; + atomic_inc(&nmi_active); + + pcr_ops->write(PCR_PIC_PRIV); + write_pic(picl_value(nmi_hz)); + + pcr_ops->write(pcr_enable); +} + +static void nmi_adjust_hz_one(void *unused) +{ + if (!__get_cpu_var(wd_enabled)) + return; + pcr_ops->write(PCR_PIC_PRIV); write_pic(picl_value(nmi_hz)); @@ -203,13 +236,13 @@ static void start_watchdog(void *unused) void nmi_adjust_hz(unsigned int new_hz) { nmi_hz = new_hz; - on_each_cpu(start_watchdog, NULL, 1); + on_each_cpu(nmi_adjust_hz_one, NULL, 1); } EXPORT_SYMBOL_GPL(nmi_adjust_hz); static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) { - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); return 0; } @@ -221,18 +254,19 @@ int __init nmi_init(void) { int err; - nmi_usable = 1; - - on_each_cpu(start_watchdog, NULL, 1); + on_each_cpu(start_nmi_watchdog, NULL, 1); err = check_nmi_watchdog(); if (!err) { err = register_reboot_notifier(&nmi_reboot_notifier); if (err) { - nmi_usable = 0; - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); + atomic_set(&nmi_active, -1); } } + if (!err) + init_hw_perf_counters(); + return err; } diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 90396702ea2c..4c26eb59e742 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -9,6 +9,8 @@ #include <linux/irq.h> #include <linux/of_device.h> #include <linux/of_platform.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> #include "of_device_common.h" @@ -97,6 +99,35 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) return IORESOURCE_MEM; } + /* + * AMBAPP bus specific translator + */ + +static int of_bus_ambapp_match(struct device_node *np) +{ + return !strcmp(np->name, "ambapp"); +} + +static void of_bus_ambapp_count_cells(struct device_node *child, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 1; + if (sizec) + *sizec = 1; +} + +static int of_bus_ambapp_map(u32 *addr, const u32 *range, + int na, int ns, int pna) +{ + return of_bus_default_map(addr, range, na, ns, pna); +} + +static unsigned long of_bus_ambapp_get_flags(const u32 *addr, + unsigned long flags) +{ + return IORESOURCE_MEM; +} /* * Array of bus specific translators @@ -121,6 +152,15 @@ static struct of_bus of_busses[] = { .map = of_bus_default_map, .get_flags = of_bus_sbus_get_flags, }, + /* AMBA */ + { + .name = "ambapp", + .addr_prop_name = "reg", + .match = of_bus_ambapp_match, + .count_cells = of_bus_ambapp_count_cells, + .map = of_bus_ambapp_map, + .get_flags = of_bus_ambapp_get_flags, + }, /* Default */ { .name = "default", diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 57859ad23547..c68648662802 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) pci_dev_put(ali_isa_bridge); } -int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) +int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) { u64 dma_addr_mask; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 2485eaa23101..23c33ff9c31e 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, - enum dma_data_direction direction) + enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; unsigned long flags, npages, oaddr; @@ -296,7 +297,8 @@ iommu_map_fail: } static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, - size_t sz, enum dma_data_direction direction) + size_t sz, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; @@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot; @@ -478,7 +481,8 @@ iommu_map_failed: } static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct scatterlist *sg; @@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, spin_unlock_irqrestore(&iommu->lock, flags); } -static void dma_4v_sync_single_for_cpu(struct device *dev, - dma_addr_t bus_addr, size_t sz, - enum dma_data_direction direction) -{ - /* Nothing to do... */ -} - -static void dma_4v_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) -{ - /* Nothing to do... */ -} - -static const struct dma_ops sun4v_dma_ops = { +static struct dma_map_ops sun4v_dma_ops = { .alloc_coherent = dma_4v_alloc_coherent, .free_coherent = dma_4v_free_coherent, .map_page = dma_4v_map_page, .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, - .sync_single_for_cpu = dma_4v_sync_single_for_cpu, - .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu, }; static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 1ae8cdd7e703..68ff00107073 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -7,6 +7,8 @@ #include <linux/init.h> #include <linux/irq.h> +#include <linux/perf_counter.h> + #include <asm/pil.h> #include <asm/pcr.h> #include <asm/nmi.h> @@ -34,10 +36,20 @@ unsigned int picl_shift; */ void deferred_pcr_work_irq(int irq, struct pt_regs *regs) { + struct pt_regs *old_regs; + clear_softint(1 << PIL_DEFERRED_PCR_WORK); + + old_regs = set_irq_regs(regs); + irq_enter(); +#ifdef CONFIG_PERF_COUNTERS + perf_counter_do_pending(); +#endif + irq_exit(); + set_irq_regs(old_regs); } -void schedule_deferred_pcr_work(void) +void set_perf_counter_pending(void) { set_softint(1 << PIL_DEFERRED_PCR_WORK); } diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_counter.c new file mode 100644 index 000000000000..09de4035eaa9 --- /dev/null +++ b/arch/sparc/kernel/perf_counter.c @@ -0,0 +1,557 @@ +/* Performance counter support for sparc64. + * + * Copyright (C) 2009 David S. Miller <davem@davemloft.net> + * + * This code is based almost entirely upon the x86 perf counter + * code, which is: + * + * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + */ + +#include <linux/perf_counter.h> +#include <linux/kprobes.h> +#include <linux/kernel.h> +#include <linux/kdebug.h> +#include <linux/mutex.h> + +#include <asm/cpudata.h> +#include <asm/atomic.h> +#include <asm/nmi.h> +#include <asm/pcr.h> + +/* Sparc64 chips have two performance counters, 32-bits each, with + * overflow interrupts generated on transition from 0xffffffff to 0. + * The counters are accessed in one go using a 64-bit register. + * + * Both counters are controlled using a single control register. The + * only way to stop all sampling is to clear all of the context (user, + * supervisor, hypervisor) sampling enable bits. But these bits apply + * to both counters, thus the two counters can't be enabled/disabled + * individually. + * + * The control register has two event fields, one for each of the two + * counters. It's thus nearly impossible to have one counter going + * while keeping the other one stopped. Therefore it is possible to + * get overflow interrupts for counters not currently "in use" and + * that condition must be checked in the overflow interrupt handler. + * + * So we use a hack, in that we program inactive counters with the + * "sw_count0" and "sw_count1" events. These count how many times + * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an + * unusual way to encode a NOP and therefore will not trigger in + * normal code. + */ + +#define MAX_HWCOUNTERS 2 +#define MAX_PERIOD ((1UL << 32) - 1) + +#define PIC_UPPER_INDEX 0 +#define PIC_LOWER_INDEX 1 + +struct cpu_hw_counters { + struct perf_counter *counters[MAX_HWCOUNTERS]; + unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; + unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; + int enabled; +}; +DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; + +struct perf_event_map { + u16 encoding; + u8 pic_mask; +#define PIC_NONE 0x00 +#define PIC_UPPER 0x01 +#define PIC_LOWER 0x02 +}; + +struct sparc_pmu { + const struct perf_event_map *(*event_map)(int); + int max_events; + int upper_shift; + int lower_shift; + int event_mask; + int hv_bit; + int irq_bit; + int upper_nop; + int lower_nop; +}; + +static const struct perf_event_map ultra3i_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, +}; + +static const struct perf_event_map *ultra3i_event_map(int event) +{ + return &ultra3i_perfmon_event_map[event]; +} + +static const struct sparc_pmu ultra3i_pmu = { + .event_map = ultra3i_event_map, + .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), + .upper_shift = 11, + .lower_shift = 4, + .event_mask = 0x3f, + .upper_nop = 0x1c, + .lower_nop = 0x14, +}; + +static const struct perf_event_map niagara2_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, +}; + +static const struct perf_event_map *niagara2_event_map(int event) +{ + return &niagara2_perfmon_event_map[event]; +} + +static const struct sparc_pmu niagara2_pmu = { + .event_map = niagara2_event_map, + .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), + .upper_shift = 19, + .lower_shift = 6, + .event_mask = 0xfff, + .hv_bit = 0x8, + .irq_bit = 0x03, + .upper_nop = 0x220, + .lower_nop = 0x220, +}; + +static const struct sparc_pmu *sparc_pmu __read_mostly; + +static u64 event_encoding(u64 event, int idx) +{ + if (idx == PIC_UPPER_INDEX) + event <<= sparc_pmu->upper_shift; + else + event <<= sparc_pmu->lower_shift; + return event; +} + +static u64 mask_for_index(int idx) +{ + return event_encoding(sparc_pmu->event_mask, idx); +} + +static u64 nop_for_index(int idx) +{ + return event_encoding(idx == PIC_UPPER_INDEX ? + sparc_pmu->upper_nop : + sparc_pmu->lower_nop, idx); +} + +static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, + int idx) +{ + u64 val, mask = mask_for_index(idx); + + val = pcr_ops->read(); + pcr_ops->write((val & ~mask) | hwc->config); +} + +static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, + int idx) +{ + u64 mask = mask_for_index(idx); + u64 nop = nop_for_index(idx); + u64 val = pcr_ops->read(); + + pcr_ops->write((val & ~mask) | nop); +} + +void hw_perf_enable(void) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + u64 val; + int i; + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + val = pcr_ops->read(); + + for (i = 0; i < MAX_HWCOUNTERS; i++) { + struct perf_counter *cp = cpuc->counters[i]; + struct hw_perf_counter *hwc; + + if (!cp) + continue; + hwc = &cp->hw; + val |= hwc->config_base; + } + + pcr_ops->write(val); +} + +void hw_perf_disable(void) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + u64 val; + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + + val = pcr_ops->read(); + val &= ~(PCR_UTRACE | PCR_STRACE | + sparc_pmu->hv_bit | sparc_pmu->irq_bit); + pcr_ops->write(val); +} + +static u32 read_pmc(int idx) +{ + u64 val; + + read_pic(val); + if (idx == PIC_UPPER_INDEX) + val >>= 32; + + return val & 0xffffffff; +} + +static void write_pmc(int idx, u64 val) +{ + u64 shift, mask, pic; + + shift = 0; + if (idx == PIC_UPPER_INDEX) + shift = 32; + + mask = ((u64) 0xffffffff) << shift; + val <<= shift; + + read_pic(pic); + pic &= ~mask; + pic |= val; + write_pic(pic); +} + +static int sparc_perf_counter_set_period(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) +{ + s64 left = atomic64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + if (left > MAX_PERIOD) + left = MAX_PERIOD; + + atomic64_set(&hwc->prev_count, (u64)-left); + + write_pmc(idx, (u64)(-left) & 0xffffffff); + + perf_counter_update_userpage(counter); + + return ret; +} + +static int sparc_pmu_enable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct hw_perf_counter *hwc = &counter->hw; + int idx = hwc->idx; + + if (test_and_set_bit(idx, cpuc->used_mask)) + return -EAGAIN; + + sparc_pmu_disable_counter(hwc, idx); + + cpuc->counters[idx] = counter; + set_bit(idx, cpuc->active_mask); + + sparc_perf_counter_set_period(counter, hwc, idx); + sparc_pmu_enable_counter(hwc, idx); + perf_counter_update_userpage(counter); + return 0; +} + +static u64 sparc_perf_counter_update(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) +{ + int shift = 64 - 32; + u64 prev_raw_count, new_raw_count; + s64 delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = read_pmc(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + atomic64_add(delta, &counter->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void sparc_pmu_disable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct hw_perf_counter *hwc = &counter->hw; + int idx = hwc->idx; + + clear_bit(idx, cpuc->active_mask); + sparc_pmu_disable_counter(hwc, idx); + + barrier(); + + sparc_perf_counter_update(counter, hwc, idx); + cpuc->counters[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_counter_update_userpage(counter); +} + +static void sparc_pmu_read(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + sparc_perf_counter_update(counter, hwc, hwc->idx); +} + +static void sparc_pmu_unthrottle(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + sparc_pmu_enable_counter(hwc, hwc->idx); +} + +static atomic_t active_counters = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmc_grab_mutex); + +void perf_counter_grab_pmc(void) +{ + if (atomic_inc_not_zero(&active_counters)) + return; + + mutex_lock(&pmc_grab_mutex); + if (atomic_read(&active_counters) == 0) { + if (atomic_read(&nmi_active) > 0) { + on_each_cpu(stop_nmi_watchdog, NULL, 1); + BUG_ON(atomic_read(&nmi_active) != 0); + } + atomic_inc(&active_counters); + } + mutex_unlock(&pmc_grab_mutex); +} + +void perf_counter_release_pmc(void) +{ + if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { + if (atomic_read(&nmi_active) == 0) + on_each_cpu(start_nmi_watchdog, NULL, 1); + mutex_unlock(&pmc_grab_mutex); + } +} + +static void hw_perf_counter_destroy(struct perf_counter *counter) +{ + perf_counter_release_pmc(); +} + +static int __hw_perf_counter_init(struct perf_counter *counter) +{ + struct perf_counter_attr *attr = &counter->attr; + struct hw_perf_counter *hwc = &counter->hw; + const struct perf_event_map *pmap; + u64 enc; + + if (atomic_read(&nmi_active) < 0) + return -ENODEV; + + if (attr->type != PERF_TYPE_HARDWARE) + return -EOPNOTSUPP; + + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + + perf_counter_grab_pmc(); + counter->destroy = hw_perf_counter_destroy; + + /* We save the enable bits in the config_base. So to + * turn off sampling just write 'config', and to enable + * things write 'config | config_base'. + */ + hwc->config_base = sparc_pmu->irq_bit; + if (!attr->exclude_user) + hwc->config_base |= PCR_UTRACE; + if (!attr->exclude_kernel) + hwc->config_base |= PCR_STRACE; + if (!attr->exclude_hv) + hwc->config_base |= sparc_pmu->hv_bit; + + if (!hwc->sample_period) { + hwc->sample_period = MAX_PERIOD; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } + + pmap = sparc_pmu->event_map(attr->config); + + enc = pmap->encoding; + if (pmap->pic_mask & PIC_UPPER) { + hwc->idx = PIC_UPPER_INDEX; + enc <<= sparc_pmu->upper_shift; + } else { + hwc->idx = PIC_LOWER_INDEX; + enc <<= sparc_pmu->lower_shift; + } + + hwc->config |= enc; + return 0; +} + +static const struct pmu pmu = { + .enable = sparc_pmu_enable, + .disable = sparc_pmu_disable, + .read = sparc_pmu_read, + .unthrottle = sparc_pmu_unthrottle, +}; + +const struct pmu *hw_perf_counter_init(struct perf_counter *counter) +{ + int err = __hw_perf_counter_init(counter); + + if (err) + return ERR_PTR(err); + return &pmu; +} + +void perf_counter_print_debug(void) +{ + unsigned long flags; + u64 pcr, pic; + int cpu; + + if (!sparc_pmu) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr = pcr_ops->read(); + read_pic(pic); + + pr_info("\n"); + pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", + cpu, pcr, pic); + + local_irq_restore(flags); +} + +static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, + unsigned long cmd, void *__args) +{ + struct die_args *args = __args; + struct perf_sample_data data; + struct cpu_hw_counters *cpuc; + struct pt_regs *regs; + int idx; + + if (!atomic_read(&active_counters)) + return NOTIFY_DONE; + + switch (cmd) { + case DIE_NMI: + break; + + default: + return NOTIFY_DONE; + } + + regs = args->regs; + + data.regs = regs; + data.addr = 0; + + cpuc = &__get_cpu_var(cpu_hw_counters); + for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { + struct perf_counter *counter = cpuc->counters[idx]; + struct hw_perf_counter *hwc; + u64 val; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + hwc = &counter->hw; + val = sparc_perf_counter_update(counter, hwc, idx); + if (val & (1ULL << 31)) + continue; + + data.period = counter->hw.last_period; + if (!sparc_perf_counter_set_period(counter, hwc, idx)) + continue; + + if (perf_counter_overflow(counter, 1, &data)) + sparc_pmu_disable_counter(hwc, idx); + } + + return NOTIFY_STOP; +} + +static __read_mostly struct notifier_block perf_counter_nmi_notifier = { + .notifier_call = perf_counter_nmi_handler, +}; + +static bool __init supported_pmu(void) +{ + if (!strcmp(sparc_pmu_type, "ultra3i")) { + sparc_pmu = &ultra3i_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "niagara2")) { + sparc_pmu = &niagara2_pmu; + return true; + } + return false; +} + +void __init init_hw_perf_counters(void) +{ + pr_info("Performance counters: "); + + if (!supported_pmu()) { + pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); + return; + } + + pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); + + /* All sparc64 PMUs currently have 2 counters. But this simple + * driver only supports one active counter at a time. + */ + perf_max_counters = 1; + + register_die_notifier(&perf_counter_nmi_notifier); +} diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 4041f94e7724..18d67854a1b8 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) } } -void __trigger_all_cpu_backtrace(void) +void arch_trigger_all_cpu_backtrace(void) { struct thread_info *tp = current_thread_info(); struct pt_regs *regs = get_irq_regs(); @@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void) static void sysrq_handle_globreg(int key, struct tty_struct *tty) { - __trigger_all_cpu_backtrace(); + arch_trigger_all_cpu_backtrace(); } static struct sysrq_key_op sparc_globalreg_op = { diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index fe43e80772db..0a37e8cfd160 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -24,6 +24,8 @@ #include <asm/prom.h> #include <asm/oplib.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> #include "prom.h" @@ -131,6 +133,35 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) regs->which_io, regs->phys_addr); } +/* "name:vendor:device@irq,addrlo" */ +static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) +{ + struct amba_prom_registers *regs; unsigned int *intr; + unsigned int *device, *vendor; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + regs = prop->value; + prop = of_find_property(dp, "interrupts", NULL); + if (!prop) + return; + intr = prop->value; + prop = of_find_property(dp, "vendor", NULL); + if (!prop) + return; + vendor = prop->value; + prop = of_find_property(dp, "device", NULL); + if (!prop) + return; + device = prop->value; + + sprintf(tmp_buf, "%s:%d:%d@%x,%x", + dp->name, *vendor, *device, + *intr, regs->phys_addr); +} + static void __init __build_path_component(struct device_node *dp, char *tmp_buf) { struct device_node *parent = dp->parent; @@ -143,6 +174,8 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) return sbus_path_component(dp, tmp_buf); if (!strcmp(parent->type, "ebus")) return ebus_path_component(dp, tmp_buf); + if (!strcmp(parent->type, "ambapp")) + return ambapp_path_component(dp, tmp_buf); /* "isa" is handled with platform naming */ } diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 0fb5789d43c8..138910c67206 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -22,9 +22,12 @@ #include <linux/of.h> #include <asm/prom.h> #include <asm/oplib.h> +#include <asm/leon.h> #include "prom.h" +void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp); + struct device_node *of_console_device; EXPORT_SYMBOL(of_console_device); @@ -161,7 +164,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, name = prom_nextprop(node, prev, p->name); } - if (strlen(name) == 0) { + if (!name || strlen(name) == 0) { tmp = p; return NULL; } @@ -242,7 +245,7 @@ static struct device_node * __init prom_create_node(phandle node, return dp; } -static char * __init build_full_name(struct device_node *dp) +char * __init build_full_name(struct device_node *dp) { int len, ourlen, plen; char *n; @@ -289,6 +292,9 @@ static struct device_node * __init prom_build_tree(struct device_node *parent, dp->child = prom_build_tree(dp, prom_getchild(node), nextp); + if (prom_build_more) + prom_build_more(dp, nextp); + node = prom_getsibling(node); } diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 998cadb4e7f2..16a47ffe03c1 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -235,6 +235,8 @@ void __init setup_arch(char **cmdline_p) sparc_cpu_model = sun4e; if (!strcmp(&cputypval,"sun4u")) sparc_cpu_model = sun4u; + if (!strncmp(&cputypval, "leon" , 4)) + sparc_cpu_model = sparc_leon; printk("ARCH: "); switch(sparc_cpu_model) { @@ -256,6 +258,9 @@ void __init setup_arch(char **cmdline_p) case sun4u: printk("SUN4U\n"); break; + case sparc_leon: + printk("LEON\n"); + break; default: printk("UNKNOWN!\n"); break; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 181d069a2d44..7ce1a1005b1d 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + if (current->replacement_session_keyring) + key_replace_session_keyring(); } } diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index ec82d76dc6f2..647afbda7ae1 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + if (current->replacement_session_keyring) + key_replace_session_keyring(); } } + diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 3691907a43b4..ff68373ce6d6 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1389,8 +1389,8 @@ void smp_send_stop(void) * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ -static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, - unsigned long align) +static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, + size_t align) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); #ifdef CONFIG_NEED_MULTIPLE_NODES @@ -1415,127 +1415,35 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, #endif } -static size_t pcpur_size __initdata; -static void **pcpur_ptrs __initdata; - -static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) +static void __init pcpu_free_bootmem(void *ptr, size_t size) { - size_t off = (size_t)pageno << PAGE_SHIFT; - - if (off >= pcpur_size) - return NULL; - - return virt_to_page(pcpur_ptrs[cpu] + off); + free_bootmem(__pa(ptr), size); } -#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) - -static void __init pcpu_map_range(unsigned long start, unsigned long end, - struct page *page) +static int pcpu_cpu_distance(unsigned int from, unsigned int to) { - unsigned long pfn = page_to_pfn(page); - unsigned long pte_base; - - BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL)); - - pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | - _PAGE_CP_4U | _PAGE_CV_4U | - _PAGE_P_4U | _PAGE_W_4U); - if (tlb_type == hypervisor) - pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | - _PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); - - while (start < end) { - pgd_t *pgd = pgd_offset_k(start); - unsigned long this_end; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pud = pud_offset(pgd, start); - if (pud_none(*pud)) { - pmd_t *new; - - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); - pud_populate(&init_mm, pud, new); - } - - pmd = pmd_offset(pud, start); - if (!pmd_present(*pmd)) { - pte_t *new; - - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); - pmd_populate_kernel(&init_mm, pmd, new); - } - - pte = pte_offset_kernel(pmd, start); - this_end = (start + PMD_SIZE) & PMD_MASK; - if (this_end > end) - this_end = end; - - while (start < this_end) { - unsigned long paddr = pfn << PAGE_SHIFT; - - pte_val(*pte) = (paddr | pte_base); - - start += PAGE_SIZE; - pte++; - pfn++; - } - } + if (cpu_to_node(from) == cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; } void __init setup_per_cpu_areas(void) { - size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; - static struct vm_struct vm; - unsigned long delta, cpu; - size_t pcpu_unit_size; - size_t ptrs_size; - - pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + - PERCPU_DYNAMIC_RESERVE); - dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; - + unsigned long delta; + unsigned int cpu; + int rc; - ptrs_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpur_ptrs[0])); - pcpur_ptrs = alloc_bootmem(ptrs_size); - - for_each_possible_cpu(cpu) { - pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, - PCPU_CHUNK_SIZE); - - free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), - PCPU_CHUNK_SIZE - pcpur_size); - - memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); - } - - /* allocate address and map */ - vm.flags = VM_ALLOC; - vm.size = nr_cpu_ids * PCPU_CHUNK_SIZE; - vm_area_register_early(&vm, PCPU_CHUNK_SIZE); - - for_each_possible_cpu(cpu) { - unsigned long start = (unsigned long) vm.addr; - unsigned long end; - - start += cpu * PCPU_CHUNK_SIZE; - end = start + PCPU_CHUNK_SIZE; - pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu])); - } - - pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size, - PERCPU_MODULE_RESERVE, dyn_size, - PCPU_CHUNK_SIZE, vm.addr, NULL); - - free_bootmem(__pa(pcpur_ptrs), ptrs_size); + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, 4 << 20, + pcpu_cpu_distance, pcpu_alloc_bootmem, + pcpu_free_bootmem); + if (rc) + panic("failed to initialize first chunk (%d)", rc); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; - for_each_possible_cpu(cpu) { - __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; - } + for_each_possible_cpu(cpu) + __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; /* Setup %g5 for the boot cpu. */ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index aed94869ad6a..e7061138c98a 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -121,7 +121,7 @@ SIGN2(sys32_syslog, sys_syslog, %o0, %o2) SIGN1(sys32_umask, sys_umask, %o0) SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) SIGN1(sys32_sendto, sys_sendto, %o0) -SIGN1(sys32_recvfrom, sys_recvfrom, %o0) +SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) SIGN2(sys32_connect, sys_connect, %o0, %o2) SIGN2(sys32_bind, sys_bind, %o0, %o2) diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index d28f496f4669..ca39c606fe8e 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -2,6 +2,7 @@ * * Copyright (C) 2007 David S. Miller <davem@davemloft.net> */ +#include <linux/sched.h> #include <linux/sysdev.h> #include <linux/cpu.h> #include <linux/smp.h> diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 690901657291..04181577cb65 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -82,5 +82,5 @@ sys_call_table: /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv -/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo +/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 2ee7250ba7ae..91b06b7f7acf 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -83,7 +83,7 @@ sys_call_table32: /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv - .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo + .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open #endif /* CONFIG_COMPAT */ @@ -158,4 +158,4 @@ sys_call_table: /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv - .word sys_pwritev, sys_rt_tgsigqueueinfo + .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index fcbbd000ec08..866390feb683 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -171,12 +171,8 @@ SECTIONS } _end = . ; - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - } - STABS_DEBUG DWARF_DEBUG + + DISCARDS } diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 681abe0a4594..79836a7dd00c 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_SPARC32) += loadmmu.o obj-y += generic_$(BITS).o obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o +obj-$(CONFIG_SPARC_LEON)+= leon_mm.o # Only used by sparc64 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 26bb3919ff1f..54114ad0bdee 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -34,6 +34,7 @@ #include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ #include <asm/tlb.h> #include <asm/prom.h> +#include <asm/leon.h> DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); @@ -326,6 +327,9 @@ void __init paging_init(void) sparc_unmapped_base = 0xe0000000; BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); break; + case sparc_leon: + leon_init(); + /* fall through */ case sun4m: case sun4d: srmmu_paging_init(); diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c new file mode 100644 index 000000000000..c0e01297e64e --- /dev/null +++ b/arch/sparc/mm/leon_mm.c @@ -0,0 +1,260 @@ +/* + * linux/arch/sparc/mm/leon_m.c + * + * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + * + * do srmmu probe in software + * + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <asm/asi.h> +#include <asm/leon.h> +#include <asm/tlbflush.h> + +int leon_flush_during_switch = 1; +int srmmu_swprobe_trace; + +unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr) +{ + + unsigned int ctxtbl; + unsigned int pgd, pmd, ped; + unsigned int ptr; + unsigned int lvl, pte, paddrbase; + unsigned int ctx; + unsigned int paddr_calc; + + paddrbase = 0; + + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: trace on\n"); + + ctxtbl = srmmu_get_ctable_ptr(); + if (!(ctxtbl)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n"); + return 0; + } + if (!_pfn_valid(PFN(ctxtbl))) { + if (srmmu_swprobe_trace) + printk(KERN_INFO + "swprobe: !_pfn_valid(%x)=>0\n", + PFN(ctxtbl)); + return 0; + } + + ctx = srmmu_get_context(); + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx); + + pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4)); + + if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: pgd is entry level 3\n"); + lvl = 3; + pte = pgd; + paddrbase = pgd & _SRMMU_PTE_PMASK_LEON; + goto ready; + } + if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: pgd is invalid => 0\n"); + return 0; + } + + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd); + + ptr = (pgd & SRMMU_PTD_PMASK) << 4; + ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4); + if (!_pfn_valid(PFN(ptr))) + return 0; + + pmd = LEON_BYPASS_LOAD_PA(ptr); + if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: pmd is entry level 2\n"); + lvl = 2; + pte = pmd; + paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; + goto ready; + } + if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: pmd is invalid => 0\n"); + return 0; + } + + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd); + + ptr = (pmd & SRMMU_PTD_PMASK) << 4; + ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4); + if (!_pfn_valid(PFN(ptr))) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n", + PFN(ptr)); + return 0; + } + + ped = LEON_BYPASS_LOAD_PA(ptr); + + if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: ped is entry level 1\n"); + lvl = 1; + pte = ped; + paddrbase = ped & _SRMMU_PTE_PMASK_LEON; + goto ready; + } + if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: ped is invalid => 0\n"); + return 0; + } + + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped); + + ptr = (ped & SRMMU_PTD_PMASK) << 4; + ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4); + if (!_pfn_valid(PFN(ptr))) + return 0; + + ptr = LEON_BYPASS_LOAD_PA(ptr); + if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: ptr is entry level 0\n"); + lvl = 0; + pte = ptr; + paddrbase = ptr & _SRMMU_PTE_PMASK_LEON; + goto ready; + } + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: ptr is invalid => 0\n"); + return 0; + +ready: + switch (lvl) { + case 0: + paddr_calc = + (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4); + break; + case 1: + paddr_calc = + (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4); + break; + case 2: + paddr_calc = + (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4); + break; + default: + case 3: + paddr_calc = vaddr; + break; + } + if (srmmu_swprobe_trace) + printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); + if (paddr) + *paddr = paddr_calc; + return paddrbase; +} + +void leon_flush_icache_all(void) +{ + __asm__ __volatile__(" flush "); /*iflush*/ +} + +void leon_flush_dcache_all(void) +{ + __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : + "i"(ASI_LEON_DFLUSH) : "memory"); +} + +void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page) +{ + if (vma->vm_flags & VM_EXEC) + leon_flush_icache_all(); + leon_flush_dcache_all(); +} + +void leon_flush_cache_all(void) +{ + __asm__ __volatile__(" flush "); /*iflush*/ + __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : + "i"(ASI_LEON_DFLUSH) : "memory"); +} + +void leon_flush_tlb_all(void) +{ + leon_flush_cache_all(); + __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400), + "i"(ASI_LEON_MMUFLUSH) : "memory"); +} + +/* get all cache regs */ +void leon3_getCacheRegs(struct leon3_cacheregs *regs) +{ + unsigned long ccr, iccr, dccr; + + if (!regs) + return; + /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */ + __asm__ __volatile__("lda [%%g0] %3, %0\n\t" + "mov 0x08, %%g1\n\t" + "lda [%%g1] %3, %1\n\t" + "mov 0x0c, %%g1\n\t" + "lda [%%g1] %3, %2\n\t" + : "=r"(ccr), "=r"(iccr), "=r"(dccr) + /* output */ + : "i"(ASI_LEON_CACHEREGS) /* input */ + : "g1" /* clobber list */ + ); + regs->ccr = ccr; + regs->iccr = iccr; + regs->dccr = dccr; +} + +/* Due to virtual cache we need to check cache configuration if + * it is possible to skip flushing in some cases. + * + * Leon2 and Leon3 differ in their way of telling cache information + * + */ +int leon_flush_needed(void) +{ + int flush_needed = -1; + unsigned int ssize, sets; + char *setStr[4] = + { "direct mapped", "2-way associative", "3-way associative", + "4-way associative" + }; + /* leon 3 */ + struct leon3_cacheregs cregs; + leon3_getCacheRegs(&cregs); + sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24; + /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */ + ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20); + + printk(KERN_INFO "CACHE: %s cache, set size %dk\n", + sets > 3 ? "unknown" : setStr[sets], ssize); + if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) { + /* Set Size <= Page size ==> + flush on every context switch not needed. */ + flush_needed = 0; + printk(KERN_INFO "CACHE: not flushing on every context switch\n"); + } + return flush_needed; +} + +void leon_switch_mm(void) +{ + flush_tlb_mm((void *)0); + if (leon_flush_during_switch) + leon_flush_cache_all(); +} diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c index 652be05acbea..82ec8f666036 100644 --- a/arch/sparc/mm/loadmmu.c +++ b/arch/sparc/mm/loadmmu.c @@ -33,6 +33,7 @@ void __init load_mmu(void) break; case sun4m: case sun4d: + case sparc_leon: ld_mmu_srmmu(); break; default: diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index ade4eb373bdd..509b1ffeba66 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -46,6 +46,7 @@ #include <asm/tsunami.h> #include <asm/swift.h> #include <asm/turbosparc.h> +#include <asm/leon.h> #include <asm/btfixup.h> @@ -569,6 +570,9 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); } + if (sparc_cpu_model == sparc_leon) + leon_switch_mm(); + if (is_hypersparc) hyper_flush_whole_icache(); @@ -1977,6 +1981,45 @@ static void __init init_viking(void) poke_srmmu = poke_viking; } +#ifdef CONFIG_SPARC_LEON + +void __init poke_leonsparc(void) +{ +} + +void __init init_leon(void) +{ + + srmmu_name = "Leon"; + + BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, + BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, + BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, + BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, + BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, + BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, + BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); + + poke_srmmu = poke_leonsparc; + + srmmu_cache_pagetables = 0; + + leon_flush_during_switch = leon_flush_needed(); +} +#endif + /* Probe for the srmmu chip version. */ static void __init get_srmmu_type(void) { @@ -1992,7 +2035,15 @@ static void __init get_srmmu_type(void) psr_typ = (psr >> 28) & 0xf; psr_vers = (psr >> 24) & 0xf; - /* First, check for HyperSparc or Cypress. */ + /* First, check for sparc-leon. */ + if (sparc_cpu_model == sparc_leon) { + psr_typ = 0xf; /* hardcoded ids for older models/simulators */ + psr_vers = 2; + init_leon(); + return; + } + + /* Second, check for HyperSparc or Cypress. */ if(mod_typ == 1) { switch(mod_rev) { case 7: diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index d172f86439b1..f97cb8b6ee5f 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c @@ -21,7 +21,7 @@ static int profile_timer_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { - struct die_args *args = (struct die_args *)data; + struct die_args *args = data; int ret = NOTIFY_DONE; switch (val) { @@ -57,7 +57,7 @@ static void timer_stop(void) static int op_nmi_timer_init(struct oprofile_operations *ops) { - if (!nmi_usable) + if (atomic_read(&nmi_active) <= 0) return -ENODEV; ops->start = timer_start; |