diff options
author | Arnd Bergmann <arnd@arndb.de> | 2011-12-27 22:04:51 +0000 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2011-12-27 22:05:06 +0000 |
commit | 07b98403ee67838bbaded43bd687875b9d7f74e0 (patch) | |
tree | 0b1f155ae4628a2be4dc4dd4c7fbeeaf1d8016dc | |
parent | b17471f5d121a53be1ccf6e0b0599441e56b468c (diff) | |
parent | f4ebf1d1f8d10b703493e76300605e8be2f21bf5 (diff) | |
download | linux-stable-07b98403ee67838bbaded43bd687875b9d7f74e0.tar.gz linux-stable-07b98403ee67838bbaded43bd687875b9d7f74e0.tar.bz2 linux-stable-07b98403ee67838bbaded43bd687875b9d7f74e0.zip |
Merge branch 'omap/hwmod' into next/drivers
This is needed as a dependency for omap/ehci.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
379 files changed, 4193 insertions, 2748 deletions
@@ -688,10 +688,13 @@ S: Oxfordshire, UK. N: Kees Cook E: kees@outflux.net -W: http://outflux.net/ -P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30 1975 1FFF 4BA9 1706 3E6D -D: Minor updates to SCSI types, added /proc/pid/maps protection +E: kees@ubuntu.com +E: keescook@chromium.org +W: http://outflux.net/blog/ +P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E 6E13 8972 F4DF DC6D C026 +D: Various security things, bug fixes, and documentation. S: (ask for current address) +S: Portland, Oregon S: USA N: Robin Cornelius diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a0c5c5f4fce6..81c287fad79d 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. CPU-intensive style benchmark, and it can vary highly in a microbenchmark depending on workload and compiler. - 1: only for 32-bit processes - 2: only for 64-bit processes + 32: only for 32-bit processes + 64: only for 64-bit processes on: enable for both 32- and 64-bit processes off: disable for both 32- and 64-bit processes - amd_iommu= [HW,X86-84] + amd_iommu= [HW,X86-64] Pass parameters to the AMD IOMMU driver in the system. Possible values are: fullflush - enable flushing of IO/TLB entries when diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index f049a1ca186f..589f2da5d545 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -282,11 +282,11 @@ tcp_max_ssthresh - INTEGER Default: 0 (off) tcp_max_syn_backlog - INTEGER - Maximal number of remembered connection requests, which are - still did not receive an acknowledgment from connecting client. - Default value is 1024 for systems with more than 128Mb of memory, - and 128 for low memory machines. If server suffers of overload, - try to increase this number. + Maximal number of remembered connection requests, which have not + received an acknowledgment from connecting client. + The minimal value is 128 for low memory machines, and it will + increase in proportion to the memory of machine. + If server suffers from overload, try increasing this number. tcp_max_tw_buckets - INTEGER Maximal number of timewait sockets held by system simultaneously. diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt index 3e2ec9cbf397..d50c14df3411 100644 --- a/Documentation/sound/alsa/soc/machine.txt +++ b/Documentation/sound/alsa/soc/machine.txt @@ -50,8 +50,7 @@ Machine DAI Configuration The machine DAI configuration glues all the codec and CPU DAIs together. It can also be used to set up the DAI system clock and for any machine related DAI initialisation e.g. the machine audio map can be connected to the codec audio -map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c -for examples. +map, unconnected codec pins can be set as such. struct snd_soc_dai_link is used to set up each DAI in your machine. e.g. @@ -83,8 +82,7 @@ Machine Power Map The machine driver can optionally extend the codec power map and to become an audio power map of the audio subsystem. This allows for automatic power up/down of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack -sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for -details. +sockets in the machine init function. Machine Controls diff --git a/Documentation/usb/linux-cdc-acm.inf b/Documentation/usb/linux-cdc-acm.inf index 37a02ce54841..f0ffc27d4c0a 100644 --- a/Documentation/usb/linux-cdc-acm.inf +++ b/Documentation/usb/linux-cdc-acm.inf @@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys [SourceDisksFiles] [SourceDisksNames] [DeviceList] -%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02 +%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00 [DeviceList.NTamd64] -%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02 +%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00 ;------------------------------------------------------------------------------ diff --git a/MAINTAINERS b/MAINTAINERS index 447560284996..b9db108f01c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -511,8 +511,8 @@ M: Joerg Roedel <joerg.roedel@amd.com> L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git S: Supported -F: arch/x86/kernel/amd_iommu*.c -F: arch/x86/include/asm/amd_iommu*.h +F: drivers/iommu/amd_iommu*.[ch] +F: include/linux/amd-iommu.h AMD MICROCODE UPDATE SUPPORT M: Andreas Herrmann <andreas.herrmann3@amd.com> @@ -1054,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES M: Ben Dooks <ben-linux@fluff.org> M: Kukjin Kim <kgene.kim@samsung.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) W: http://www.fluff.org/ben/linux/ S: Maintained F: arch/arm/plat-samsung/ F: arch/arm/plat-s3c24xx/ F: arch/arm/plat-s5p/ +F: arch/arm/mach-s3c24*/ +F: arch/arm/mach-s3c64xx/ F: drivers/*/*s3c2410* F: drivers/*/*/*s3c2410* - -ARM/S3C2410 ARM ARCHITECTURE -M: Ben Dooks <ben-linux@fluff.org> -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.fluff.org/ben/linux/ -S: Maintained -F: arch/arm/mach-s3c2410/ - -ARM/S3C244x ARM ARCHITECTURE -M: Ben Dooks <ben-linux@fluff.org> -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.fluff.org/ben/linux/ -S: Maintained -F: arch/arm/mach-s3c2440/ -F: arch/arm/mach-s3c2443/ - -ARM/S3C64xx ARM ARCHITECTURE -M: Ben Dooks <ben-linux@fluff.org> -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.fluff.org/ben/linux/ -S: Maintained -F: arch/arm/mach-s3c64xx/ +F: drivers/spi/spi-s3c* +F: sound/soc/samsung/* ARM/S5P EXYNOS ARM ARCHITECTURES M: Kukjin Kim <kgene.kim@samsung.com> @@ -4319,8 +4302,9 @@ F: include/linux/mm.h F: mm/ MEMORY RESOURCE CONTROLLER +M: Johannes Weiner <hannes@cmpxchg.org> +M: Michal Hocko <mhocko@suse.cz> M: Balbir Singh <bsingharora@gmail.com> -M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> L: cgroups@vger.kernel.org L: linux-mm@kvack.org @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index a7e777581378..945a34f2a34d 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig @@ -48,12 +48,7 @@ CONFIG_MACH_SX1=y CONFIG_MACH_NOKIA770=y CONFIG_MACH_AMS_DELTA=y CONFIG_MACH_OMAP_GENERIC=y -CONFIG_OMAP_ARM_216MHZ=y -CONFIG_OMAP_ARM_195MHZ=y -CONFIG_OMAP_ARM_192MHZ=y CONFIG_OMAP_ARM_182MHZ=y -CONFIG_OMAP_ARM_168MHZ=y -# CONFIG_OMAP_ARM_60MHZ is not set # CONFIG_ARM_THUMB is not set CONFIG_PCCARD=y CONFIG_OMAP_CF=y diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c475379199b1..8e9c98edc068 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -353,15 +353,15 @@ validate_group(struct perf_event *event) fake_pmu.used_mask = fake_used_mask; if (!validate_event(&fake_pmu, leader)) - return -ENOSPC; + return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_pmu, sibling)) - return -ENOSPC; + return -EINVAL; } if (!validate_event(&fake_pmu, event)) - return -ENOSPC; + return -EINVAL; return 0; } diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 5610f14e342e..55d22911d0c2 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_GADGET_AT91 +#ifdef CONFIG_USB_AT91 static struct at91_udc_data udc_data; static struct resource udc_resources[] = { diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c index 249ed1f5912d..1c945e3ba57c 100644 --- a/arch/arm/mach-at91/at91sam9260.c +++ b/arch/arm/mach-at91/at91sam9260.c @@ -197,9 +197,9 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), - CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk), - CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk), - CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk), + CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk), + CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk), + CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk), CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk), /* more usart lookup table for DT entries */ CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck), diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index ff75f7d4091b..b1a4812d9965 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_GADGET_AT91 +#ifdef CONFIG_USB_AT91 static struct at91_udc_data udc_data; static struct resource udc_resources[] = { diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index ae78f4d03b73..a178b58b0b9c 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_GADGET_AT91 +#ifdef CONFIG_USB_AT91 static struct at91_udc_data udc_data; static struct resource udc_resources[] = { diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index 68562ce2af94..183b5f17f55e 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} * USB Device (Gadget) * -------------------------------------------------------------------- */ -#ifdef CONFIG_USB_GADGET_AT91 +#ifdef CONFIG_USB_AT91 static struct at91_udc_data udc_data; static struct resource udc_resources[] = { diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h index 8f4866045b41..ec164a4124c9 100644 --- a/arch/arm/mach-at91/include/mach/system_rev.h +++ b/arch/arm/mach-at91/include/mach/system_rev.h @@ -19,7 +19,7 @@ #define BOARD_HAVE_NAND_16BIT (1 << 31) static inline int board_have_nand_16bit(void) { - return system_rev & BOARD_HAVE_NAND_16BIT; + return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0; } #endif /* __ARCH_SYSTEM_REV_H__ */ diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 1d7d24995226..6659a90dbcad 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = { .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction), .tdm_slots = 2, .serial_dir = da850_iis_serializer_direction, - .asp_chan_q = EVENTQ_1, + .asp_chan_q = EVENTQ_0, .version = MCASP_VERSION_2, .txnumevt = 1, .rxnumevt = 1, diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 1918ae711428..46e1f4173b97 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = { /* UBL (a few copies) plus U-Boot */ .name = "bootloader", .offset = 0, - .size = 28 * NAND_BLOCK_SIZE, + .size = 30 * NAND_BLOCK_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { /* U-Boot environment */ diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index e574d7f837a8..635bf7740157 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode) int val; u32 value; - if (!vpif_vsclkdis_reg || !cpld_client) + if (!vpif_vidclkctl_reg || !cpld_client) return -ENXIO; val = i2c_smbus_read_byte(cpld_client); @@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode) return val; spin_lock_irqsave(&vpif_reg_lock, flags); - value = __raw_readl(vpif_vsclkdis_reg); + value = __raw_readl(vpif_vidclkctl_reg); if (mux_mode) { val &= VPIF_INPUT_TWO_CHANNEL; value |= VIDCH1CLK; @@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode) val |= VPIF_INPUT_ONE_CHANNEL; value &= ~VIDCH1CLK; } - __raw_writel(value, vpif_vsclkdis_reg); + __raw_writel(value, vpif_vidclkctl_reg); spin_unlock_irqrestore(&vpif_reg_lock, flags); err = i2c_smbus_write_byte(cpld_client, val); diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 0b68ed534f8e..af27c130595f 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -161,7 +161,6 @@ static struct clk dsp_clk = { .name = "dsp", .parent = &pll1_sysclk1, .lpsc = DM646X_LPSC_C64X_CPU, - .flags = PSC_DSP, .usecount = 1, /* REVISIT how to disable? */ }; diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h index fa59c097223d..8bc3fc256171 100644 --- a/arch/arm/mach-davinci/include/mach/psc.h +++ b/arch/arm/mach-davinci/include/mach/psc.h @@ -233,7 +233,7 @@ #define PTCMD 0x120 #define PTSTAT 0x128 #define PDSTAT 0x200 -#define PDCTL1 0x304 +#define PDCTL 0x300 #define MDSTAT 0x800 #define MDCTL 0xA00 @@ -244,7 +244,10 @@ #define PSC_STATE_ENABLE 3 #define MDSTAT_STATE_MASK 0x3f +#define PDSTAT_STATE_MASK 0x1f #define MDCTL_FORCE BIT(31) +#define PDCTL_NEXT BIT(1) +#define PDCTL_EPCGOOD BIT(8) #ifndef __ASSEMBLER__ diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c index 1fb6bdff38c1..d7e210f4b55c 100644 --- a/arch/arm/mach-davinci/psc.c +++ b/arch/arm/mach-davinci/psc.c @@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id) void davinci_psc_config(unsigned int domain, unsigned int ctlr, unsigned int id, bool enable, u32 flags) { - u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl; + u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl; void __iomem *psc_base; struct davinci_soc_info *soc_info = &davinci_soc_info; u32 next_state = PSC_STATE_ENABLE; @@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr, mdctl |= MDCTL_FORCE; __raw_writel(mdctl, psc_base + MDCTL + 4 * id); - pdstat = __raw_readl(psc_base + PDSTAT); - if ((pdstat & 0x00000001) == 0) { - pdctl1 = __raw_readl(psc_base + PDCTL1); - pdctl1 |= 0x1; - __raw_writel(pdctl1, psc_base + PDCTL1); + pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain); + if ((pdstat & PDSTAT_STATE_MASK) == 0) { + pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); + pdctl |= PDCTL_NEXT; + __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); ptcmd = 1 << domain; __raw_writel(ptcmd, psc_base + PTCMD); @@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr, epcpr = __raw_readl(psc_base + EPCPR); } while ((((epcpr >> domain) & 1) == 0)); - pdctl1 = __raw_readl(psc_base + PDCTL1); - pdctl1 |= 0x100; - __raw_writel(pdctl1, psc_base + PDCTL1); + pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); + pdctl |= PDCTL_EPCGOOD; + __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); } else { ptcmd = 1 << domain; __raw_writel(ptcmd, psc_base + PTCMD); diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 9cd860a27af5..8deb012189b5 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -37,14 +37,15 @@ static void __init imx6q_map_io(void) imx6q_clock_map_io(); } -static void __init imx6q_gpio_add_irq_domain(struct device_node *np, +static int __init imx6q_gpio_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { - static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - - 32 * 7; /* imx6q gets 7 gpio ports */ + static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; + gpio_irq_base -= 32; irq_domain_add_simple(np, gpio_irq_base); - gpio_irq_base += 32; + + return 0; } static const struct of_device_id imx6q_irq_match[] __initconst = { diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c index 24030d0da6e3..0fb7a17df398 100644 --- a/arch/arm/mach-msm/devices-iommu.c +++ b/arch/arm/mach-msm/devices-iommu.c @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/bootmem.h> +#include <linux/module.h> #include <mach/irqs.h> #include <mach/iommu.h> diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-mx5/imx51-dt.c index ccc61585659b..596edd967dbf 100644 --- a/arch/arm/mach-mx5/imx51-dt.c +++ b/arch/arm/mach-mx5/imx51-dt.c @@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = { { /* sentinel */ } }; -static void __init imx51_tzic_add_irq_domain(struct device_node *np, +static int __init imx51_tzic_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { irq_domain_add_simple(np, 0); + return 0; } -static void __init imx51_gpio_add_irq_domain(struct device_node *np, +static int __init imx51_gpio_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { - static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - - 32 * 4; /* imx51 gets 4 gpio ports */ + static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; + gpio_irq_base -= 32; irq_domain_add_simple(np, gpio_irq_base); - gpio_irq_base += 32; + + return 0; } static const struct of_device_id imx51_irq_match[] __initconst = { diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-mx5/imx53-dt.c index ccaa0b81b768..85bfd5ff21b0 100644 --- a/arch/arm/mach-mx5/imx53-dt.c +++ b/arch/arm/mach-mx5/imx53-dt.c @@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = { { /* sentinel */ } }; -static void __init imx53_tzic_add_irq_domain(struct device_node *np, +static int __init imx53_tzic_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { irq_domain_add_simple(np, 0); + return 0; } -static void __init imx53_gpio_add_irq_domain(struct device_node *np, +static int __init imx53_gpio_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { - static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS - - 32 * 7; /* imx53 gets 7 gpio ports */ + static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; + gpio_irq_base -= 32; irq_domain_add_simple(np, gpio_irq_base); - gpio_irq_base += 32; + + return 0; } static const struct of_device_id imx53_irq_match[] __initconst = { diff --git a/arch/arm/mach-mxs/include/mach/mx28.h b/arch/arm/mach-mxs/include/mach/mx28.h index 75d86118b76a..30c7990f3c01 100644 --- a/arch/arm/mach-mxs/include/mach/mx28.h +++ b/arch/arm/mach-mxs/include/mach/mx28.h @@ -104,8 +104,8 @@ #define MX28_INT_CAN1 9 #define MX28_INT_LRADC_TOUCH 10 #define MX28_INT_HSADC 13 -#define MX28_INT_IRADC_THRESH0 14 -#define MX28_INT_IRADC_THRESH1 15 +#define MX28_INT_LRADC_THRESH0 14 +#define MX28_INT_LRADC_THRESH1 15 #define MX28_INT_LRADC_CH0 16 #define MX28_INT_LRADC_CH1 17 #define MX28_INT_LRADC_CH2 18 diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h index 0d2d2b470998..bde5f6634747 100644 --- a/arch/arm/mach-mxs/include/mach/mxs.h +++ b/arch/arm/mach-mxs/include/mach/mxs.h @@ -30,6 +30,7 @@ */ #define cpu_is_mx23() ( \ machine_is_mx23evk() || \ + machine_is_stmp378x() || \ 0) #define cpu_is_mx28() ( \ machine_is_mx28evk() || \ diff --git a/arch/arm/mach-mxs/mach-m28evk.c b/arch/arm/mach-mxs/mach-m28evk.c index 3b1681e4f49a..6b00577b7025 100644 --- a/arch/arm/mach-mxs/mach-m28evk.c +++ b/arch/arm/mach-mxs/mach-m28evk.c @@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = { MACHINE_START(M28EVK, "DENX M28 EVK") .map_io = mx28_map_io, .init_irq = mx28_init_irq, - .init_machine = m28evk_init, .timer = &m28evk_timer, + .init_machine = m28evk_init, MACHINE_END diff --git a/arch/arm/mach-mxs/mach-stmp378x_devb.c b/arch/arm/mach-mxs/mach-stmp378x_devb.c index 177e53123a02..6834dea38c04 100644 --- a/arch/arm/mach-mxs/mach-stmp378x_devb.c +++ b/arch/arm/mach-mxs/mach-stmp378x_devb.c @@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = { MACHINE_START(STMP378X, "STMP378X") .map_io = mx23_map_io, .init_irq = mx23_init_irq, - .init_machine = stmp378x_dvb_init, .timer = &stmp378x_dvb_timer, + .init_machine = stmp378x_dvb_init, MACHINE_END diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c index 0fcff47009cf..9a7b08b2a925 100644 --- a/arch/arm/mach-mxs/module-tx28.c +++ b/arch/arm/mach-mxs/module-tx28.c @@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = { MX28_PAD_ENET0_CRS__ENET1_RX_EN, }; -static struct fec_platform_data tx28_fec0_data = { +static const struct fec_platform_data tx28_fec0_data __initconst = { .phy = PHY_INTERFACE_MODE_RMII, }; -static struct fec_platform_data tx28_fec1_data = { +static const struct fec_platform_data tx28_fec1_data __initconst = { .phy = PHY_INTERFACE_MODE_RMII, }; diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index 1297bb58869c..9ff90a744a21 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c @@ -16,6 +16,8 @@ #include <linux/kernel.h> #include <linux/clk.h> +#include <linux/cpufreq.h> +#include <linux/delay.h> #include <linux/io.h> #include <asm/mach-types.h> /* for machine_is_* */ @@ -927,16 +929,22 @@ int __init omap1_clk_init(void) void __init omap1_clk_late_init(void) { - if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE) + unsigned long rate = ck_dpll1.rate; + + if (rate >= OMAP1_DPLL1_SANE_VALUE) return; + /* System booting at unusable rate, force reprogramming of DPLL1 */ + ck_dpll1_p->rate = 0; + /* Find the highest supported frequency and enable it */ if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) { pr_err("System frequencies not set, using default. Check your config.\n"); omap_writew(0x2290, DPLL_CTL); - omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL); + omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL); ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE; } propagate_rate(&ck_dpll1); omap1_show_rates(); + loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate); } diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index ba1aa07bdb29..c15c5c9c9085 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = { static void __init rx51_charger_init(void) { WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO, - GPIOF_OUT_INIT_LOW, "isp1704_reset")); + GPIOF_OUT_INIT_HIGH, "isp1704_reset")); platform_device_register(&rx51_charger_device); } diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c index 5d0064a4fb5a..4e1b1a2f0537 100644 --- a/arch/arm/mach-omap2/clock3xxx_data.c +++ b/arch/arm/mach-omap2/clock3xxx_data.c @@ -2480,6 +2480,16 @@ static struct clk uart4_fck = { .recalc = &followparent_recalc, }; +static struct clk uart4_fck_am35xx = { + .name = "uart4_fck", + .ops = &clkops_omap2_dflt_wait, + .parent = &per_48m_fck, + .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), + .enable_bit = OMAP3430_EN_UART4_SHIFT, + .clkdm_name = "core_l4_clkdm", + .recalc = &followparent_recalc, +}; + static struct clk gpt2_fck = { .name = "gpt2_fck", .ops = &clkops_omap2_dflt_wait, @@ -3403,6 +3413,7 @@ static struct omap_clk omap3xxx_clks[] = { CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX), CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX), CLK(NULL, "uart4_fck", &uart4_fck, CK_36XX), + CLK(NULL, "uart4_fck", &uart4_fck_am35xx, CK_3505 | CK_3517), CLK(NULL, "gpt2_fck", &gpt2_fck, CK_3XXX), CLK(NULL, "gpt3_fck", &gpt3_fck, CK_3XXX), CLK(NULL, "gpt4_fck", &gpt4_fck, CK_3XXX), diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c index 292eee3be15f..28fcb27005d2 100644 --- a/arch/arm/mach-omap2/mcbsp.c +++ b/arch/arm/mach-omap2/mcbsp.c @@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused) pdata->reg_size = 4; pdata->has_ccr = true; } + pdata->set_clk_src = omap2_mcbsp_set_clk_src; + if (id == 1) + pdata->mux_signal = omap2_mcbsp1_mux_rx_clk; if (oh->class->rev == MCBSP_CONFIG_TYPE3) { if (id == 2) @@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused) name, oh->name); return PTR_ERR(pdev); } - pdata->set_clk_src = omap2_mcbsp_set_clk_src; - if (id == 1) - pdata->mux_signal = omap2_mcbsp1_mux_rx_clk; omap_mcbsp_count++; return 0; } diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 7f8915ad5099..5324e8d93bc0 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -84,6 +84,8 @@ static struct omap_hwmod omap3xxx_mcbsp4_hwmod; static struct omap_hwmod omap3xxx_mcbsp5_hwmod; static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod; static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod; +static struct omap_hwmod omap3xxx_usb_host_hs_hwmod; +static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod; /* L3 -> L4_CORE interface */ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = { @@ -164,6 +166,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod; static struct omap_hwmod omap3xxx_uart2_hwmod; static struct omap_hwmod omap3xxx_uart3_hwmod; static struct omap_hwmod omap3xxx_uart4_hwmod; +static struct omap_hwmod am35xx_uart4_hwmod; static struct omap_hwmod omap3xxx_usbhsotg_hwmod; /* l3_core -> usbhsotg interface */ @@ -299,6 +302,23 @@ static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; +/* AM35xx: L4 CORE -> UART4 interface */ +static struct omap_hwmod_addr_space am35xx_uart4_addr_space[] = { + { + .pa_start = OMAP3_UART4_AM35XX_BASE, + .pa_end = OMAP3_UART4_AM35XX_BASE + SZ_1K - 1, + .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT, + }, +}; + +static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &am35xx_uart4_hwmod, + .clk = "uart4_ick", + .addr = am35xx_uart4_addr_space, + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + /* L4 CORE -> I2C1 interface */ static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = { .master = &omap3xxx_l4_core_hwmod, @@ -1162,6 +1182,7 @@ static struct omap_hwmod_class_sysconfig i2c_sysc = { SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .clockact = CLOCKACT_TEST_ICLK, .sysc_fields = &omap_hwmod_sysc_type1, }; @@ -1309,6 +1330,39 @@ static struct omap_hwmod omap3xxx_uart4_hwmod = { .class = &omap2_uart_class, }; +static struct omap_hwmod_irq_info am35xx_uart4_mpu_irqs[] = { + { .irq = INT_35XX_UART4_IRQ, }, +}; + +static struct omap_hwmod_dma_info am35xx_uart4_sdma_reqs[] = { + { .name = "rx", .dma_req = AM35XX_DMA_UART4_RX, }, + { .name = "tx", .dma_req = AM35XX_DMA_UART4_TX, }, +}; + +static struct omap_hwmod_ocp_if *am35xx_uart4_slaves[] = { + &am35xx_l4_core__uart4, +}; + +static struct omap_hwmod am35xx_uart4_hwmod = { + .name = "uart4", + .mpu_irqs = am35xx_uart4_mpu_irqs, + .sdma_reqs = am35xx_uart4_sdma_reqs, + .main_clk = "uart4_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_UART4_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_EN_UART4_SHIFT, + }, + }, + .slaves = am35xx_uart4_slaves, + .slaves_cnt = ARRAY_SIZE(am35xx_uart4_slaves), + .class = &omap2_uart_class, +}; + + static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, @@ -1636,7 +1690,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = { static struct omap_hwmod omap3xxx_i2c1_hwmod = { .name = "i2c1", - .flags = HWMOD_16BIT_REG, + .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap2_i2c1_mpu_irqs, .sdma_reqs = omap2_i2c1_sdma_reqs, .main_clk = "i2c1_fck", @@ -1670,7 +1724,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = { static struct omap_hwmod omap3xxx_i2c2_hwmod = { .name = "i2c2", - .flags = HWMOD_16BIT_REG, + .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap2_i2c2_mpu_irqs, .sdma_reqs = omap2_i2c2_sdma_reqs, .main_clk = "i2c2_fck", @@ -1715,7 +1769,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = { static struct omap_hwmod omap3xxx_i2c3_hwmod = { .name = "i2c3", - .flags = HWMOD_16BIT_REG, + .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = i2c3_mpu_irqs, .sdma_reqs = i2c3_sdma_reqs, .main_clk = "i2c3_fck", @@ -3072,7 +3126,35 @@ static struct omap_mmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; -static struct omap_hwmod omap3xxx_mmc1_hwmod = { +/* See 35xx errata 2.1.1.128 in SPRZ278F */ +static struct omap_mmc_dev_attr mmc1_pre_es3_dev_attr = { + .flags = (OMAP_HSMMC_SUPPORTS_DUAL_VOLT | + OMAP_HSMMC_BROKEN_MULTIBLOCK_READ), +}; + +static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = { + .name = "mmc1", + .mpu_irqs = omap34xx_mmc1_mpu_irqs, + .sdma_reqs = omap34xx_mmc1_sdma_reqs, + .opt_clks = omap34xx_mmc1_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks), + .main_clk = "mmchs1_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_MMC1_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_MMC1_SHIFT, + }, + }, + .dev_attr = &mmc1_pre_es3_dev_attr, + .slaves = omap3xxx_mmc1_slaves, + .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc1_slaves), + .class = &omap34xx_mmc_class, +}; + +static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = { .name = "mmc1", .mpu_irqs = omap34xx_mmc1_mpu_irqs, .sdma_reqs = omap34xx_mmc1_sdma_reqs, @@ -3115,7 +3197,34 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = { &omap3xxx_l4_core__mmc2, }; -static struct omap_hwmod omap3xxx_mmc2_hwmod = { +/* See 35xx errata 2.1.1.128 in SPRZ278F */ +static struct omap_mmc_dev_attr mmc2_pre_es3_dev_attr = { + .flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, +}; + +static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = { + .name = "mmc2", + .mpu_irqs = omap34xx_mmc2_mpu_irqs, + .sdma_reqs = omap34xx_mmc2_sdma_reqs, + .opt_clks = omap34xx_mmc2_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks), + .main_clk = "mmchs2_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_EN_MMC2_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_MMC2_SHIFT, + }, + }, + .dev_attr = &mmc2_pre_es3_dev_attr, + .slaves = omap3xxx_mmc2_slaves, + .slaves_cnt = ARRAY_SIZE(omap3xxx_mmc2_slaves), + .class = &omap34xx_mmc_class, +}; + +static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = { .name = "mmc2", .mpu_irqs = omap34xx_mmc2_mpu_irqs, .sdma_reqs = omap34xx_mmc2_sdma_reqs, @@ -3177,13 +3286,223 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = { .class = &omap34xx_mmc_class, }; +/* + * 'usb_host_hs' class + * high-speed multi-port usb host controller + */ +static struct omap_hwmod_ocp_if omap3xxx_usb_host_hs__l3_main_2 = { + .master = &omap3xxx_usb_host_hs_hwmod, + .slave = &omap3xxx_l3_main_hwmod, + .clk = "core_l3_ick", + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY | + SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | + SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap3xxx_usb_host_hs_hwmod_class = { + .name = "usb_host_hs", + .sysc = &omap3xxx_usb_host_hs_sysc, +}; + +static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_masters[] = { + &omap3xxx_usb_host_hs__l3_main_2, +}; + +static struct omap_hwmod_addr_space omap3xxx_usb_host_hs_addrs[] = { + { + .name = "uhh", + .pa_start = 0x48064000, + .pa_end = 0x480643ff, + .flags = ADDR_TYPE_RT + }, + { + .name = "ohci", + .pa_start = 0x48064400, + .pa_end = 0x480647ff, + }, + { + .name = "ehci", + .pa_start = 0x48064800, + .pa_end = 0x48064cff, + }, + {} +}; + +static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_host_hs = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &omap3xxx_usb_host_hs_hwmod, + .clk = "usbhost_ick", + .addr = omap3xxx_usb_host_hs_addrs, + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_ocp_if *omap3xxx_usb_host_hs_slaves[] = { + &omap3xxx_l4_core__usb_host_hs, +}; + +static struct omap_hwmod_opt_clk omap3xxx_usb_host_hs_opt_clks[] = { + { .role = "ehci_logic_fck", .clk = "usbhost_120m_fck", }, +}; + +static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = { + { .name = "ohci-irq", .irq = 76 }, + { .name = "ehci-irq", .irq = 77 }, + { .irq = -1 } +}; + +static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = { + .name = "usb_host_hs", + .class = &omap3xxx_usb_host_hs_hwmod_class, + .clkdm_name = "l3_init_clkdm", + .mpu_irqs = omap3xxx_usb_host_hs_irqs, + .main_clk = "usbhost_48m_fck", + .prcm = { + .omap2 = { + .module_offs = OMAP3430ES2_USBHOST_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430ES2_EN_USBHOST1_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430ES2_ST_USBHOST_IDLE_SHIFT, + .idlest_stdby_bit = OMAP3430ES2_ST_USBHOST_STDBY_SHIFT, + }, + }, + .opt_clks = omap3xxx_usb_host_hs_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_opt_clks), + .slaves = omap3xxx_usb_host_hs_slaves, + .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_slaves), + .masters = omap3xxx_usb_host_hs_masters, + .masters_cnt = ARRAY_SIZE(omap3xxx_usb_host_hs_masters), + + /* + * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock + * id: i660 + * + * Description: + * In the following configuration : + * - USBHOST module is set to smart-idle mode + * - PRCM asserts idle_req to the USBHOST module ( This typically + * happens when the system is going to a low power mode : all ports + * have been suspended, the master part of the USBHOST module has + * entered the standby state, and SW has cut the functional clocks) + * - an USBHOST interrupt occurs before the module is able to answer + * idle_ack, typically a remote wakeup IRQ. + * Then the USB HOST module will enter a deadlock situation where it + * is no more accessible nor functional. + * + * Workaround: + * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE + */ + + /* + * Errata: USB host EHCI may stall when entering smart-standby mode + * Id: i571 + * + * Description: + * When the USBHOST module is set to smart-standby mode, and when it is + * ready to enter the standby state (i.e. all ports are suspended and + * all attached devices are in suspend mode), then it can wrongly assert + * the Mstandby signal too early while there are still some residual OCP + * transactions ongoing. If this condition occurs, the internal state + * machine may go to an undefined state and the USB link may be stuck + * upon the next resume. + * + * Workaround: + * Don't use smart standby; use only force standby, + * hence HWMOD_SWSUP_MSTANDBY + */ + + /* + * During system boot; If the hwmod framework resets the module + * the module will have smart idle settings; which can lead to deadlock + * (above Errata Id:i660); so, dont reset the module during boot; + * Use HWMOD_INIT_NO_RESET. + */ + + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | + HWMOD_INIT_NO_RESET, +}; + +/* + * 'usb_tll_hs' class + * usb_tll_hs module is the adapter on the usb_host_hs ports + */ +static struct omap_hwmod_class_sysconfig omap3xxx_usb_tll_hs_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | + SYSC_HAS_AUTOIDLE), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap3xxx_usb_tll_hs_hwmod_class = { + .name = "usb_tll_hs", + .sysc = &omap3xxx_usb_tll_hs_sysc, +}; + +static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = { + { .name = "tll-irq", .irq = 78 }, + { .irq = -1 } +}; + +static struct omap_hwmod_addr_space omap3xxx_usb_tll_hs_addrs[] = { + { + .name = "tll", + .pa_start = 0x48062000, + .pa_end = 0x48062fff, + .flags = ADDR_TYPE_RT + }, + {} +}; + +static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_tll_hs = { + .master = &omap3xxx_l4_core_hwmod, + .slave = &omap3xxx_usb_tll_hs_hwmod, + .clk = "usbtll_ick", + .addr = omap3xxx_usb_tll_hs_addrs, + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_ocp_if *omap3xxx_usb_tll_hs_slaves[] = { + &omap3xxx_l4_core__usb_tll_hs, +}; + +static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = { + .name = "usb_tll_hs", + .class = &omap3xxx_usb_tll_hs_hwmod_class, + .clkdm_name = "l3_init_clkdm", + .mpu_irqs = omap3xxx_usb_tll_hs_irqs, + .main_clk = "usbtll_fck", + .prcm = { + .omap2 = { + .module_offs = CORE_MOD, + .prcm_reg_id = 3, + .module_bit = OMAP3430ES2_EN_USBTLL_SHIFT, + .idlest_reg_id = 3, + .idlest_idle_bit = OMAP3430ES2_ST_USBTLL_SHIFT, + }, + }, + .slaves = omap3xxx_usb_tll_hs_slaves, + .slaves_cnt = ARRAY_SIZE(omap3xxx_usb_tll_hs_slaves), +}; + static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { &omap3xxx_l3_main_hwmod, &omap3xxx_l4_core_hwmod, &omap3xxx_l4_per_hwmod, &omap3xxx_l4_wkup_hwmod, - &omap3xxx_mmc1_hwmod, - &omap3xxx_mmc2_hwmod, &omap3xxx_mmc3_hwmod, &omap3xxx_mpu_hwmod, @@ -3198,12 +3517,12 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { &omap3xxx_timer9_hwmod, &omap3xxx_timer10_hwmod, &omap3xxx_timer11_hwmod, - &omap3xxx_timer12_hwmod, &omap3xxx_wd_timer2_hwmod, &omap3xxx_uart1_hwmod, &omap3xxx_uart2_hwmod, &omap3xxx_uart3_hwmod, + /* dss class */ &omap3xxx_dss_dispc_hwmod, &omap3xxx_dss_dsi1_hwmod, @@ -3245,20 +3564,38 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = { NULL, }; +/* GP-only hwmods */ +static __initdata struct omap_hwmod *omap3xxx_gp_hwmods[] = { + &omap3xxx_timer12_hwmod, + NULL +}; + /* 3430ES1-only hwmods */ static __initdata struct omap_hwmod *omap3430es1_hwmods[] = { - &omap3xxx_iva_hwmod, &omap3430es1_dss_core_hwmod, - &omap3xxx_mailbox_hwmod, NULL }; /* 3430ES2+-only hwmods */ static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = { - &omap3xxx_iva_hwmod, &omap3xxx_dss_core_hwmod, &omap3xxx_usbhsotg_hwmod, - &omap3xxx_mailbox_hwmod, + &omap3xxx_usb_host_hs_hwmod, + &omap3xxx_usb_tll_hs_hwmod, + NULL +}; + +/* <= 3430ES3-only hwmods */ +static struct omap_hwmod *omap3430_pre_es3_hwmods[] __initdata = { + &omap3xxx_pre_es3_mmc1_hwmod, + &omap3xxx_pre_es3_mmc2_hwmod, + NULL +}; + +/* 3430ES3+-only hwmods */ +static struct omap_hwmod *omap3430_es3plus_hwmods[] __initdata = { + &omap3xxx_es3plus_mmc1_hwmod, + &omap3xxx_es3plus_mmc2_hwmod, NULL }; @@ -3280,12 +3617,21 @@ static __initdata struct omap_hwmod *omap36xx_hwmods[] = { &omap36xx_sr2_hwmod, &omap3xxx_usbhsotg_hwmod, &omap3xxx_mailbox_hwmod, + &omap3xxx_usb_host_hs_hwmod, + &omap3xxx_usb_tll_hs_hwmod, + &omap3xxx_es3plus_mmc1_hwmod, + &omap3xxx_es3plus_mmc2_hwmod, NULL }; static __initdata struct omap_hwmod *am35xx_hwmods[] = { &omap3xxx_dss_core_hwmod, /* XXX ??? */ &am35xx_usbhsotg_hwmod, + &am35xx_uart4_hwmod, + &omap3xxx_usb_host_hs_hwmod, + &omap3xxx_usb_tll_hs_hwmod, + &omap3xxx_es3plus_mmc1_hwmod, + &omap3xxx_es3plus_mmc2_hwmod, NULL }; @@ -3300,6 +3646,13 @@ int __init omap3xxx_hwmod_init(void) if (r < 0) return r; + /* Register GP-only hwmods. */ + if (omap_type() == OMAP2_DEVICE_TYPE_GP) { + r = omap_hwmod_register(omap3xxx_gp_hwmods); + if (r < 0) + return r; + } + rev = omap_rev(); /* @@ -3338,6 +3691,21 @@ int __init omap3xxx_hwmod_init(void) h = omap3430es2plus_hwmods; }; + if (h) { + r = omap_hwmod_register(h); + if (r < 0) + return r; + } + + h = NULL; + if (rev == OMAP3430_REV_ES1_0 || rev == OMAP3430_REV_ES2_0 || + rev == OMAP3430_REV_ES2_1) { + h = omap3430_pre_es3_hwmods; + } else if (rev == OMAP3430_REV_ES3_0 || rev == OMAP3430_REV_ES3_1 || + rev == OMAP3430_REV_ES3_1_2) { + h = omap3430_es3plus_hwmods; + }; + if (h) r = omap_hwmod_register(h); diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index daaf165af696..f9f151081760 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -70,6 +70,8 @@ static struct omap_hwmod omap44xx_mmc2_hwmod; static struct omap_hwmod omap44xx_mpu_hwmod; static struct omap_hwmod omap44xx_mpu_private_hwmod; static struct omap_hwmod omap44xx_usb_otg_hs_hwmod; +static struct omap_hwmod omap44xx_usb_host_hs_hwmod; +static struct omap_hwmod omap44xx_usb_tll_hs_hwmod; /* * Interconnects omap_hwmod structures @@ -2246,6 +2248,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = { SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), + .clockact = CLOCKACT_TEST_ICLK, .sysc_fields = &omap_hwmod_sysc_type1, }; @@ -2300,7 +2303,7 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = { .name = "i2c1", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_16BIT_REG, + .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c1_irqs, .sdma_reqs = omap44xx_i2c1_sdma_reqs, .main_clk = "i2c1_fck", @@ -2356,7 +2359,7 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = { .name = "i2c2", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_16BIT_REG, + .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c2_irqs, .sdma_reqs = omap44xx_i2c2_sdma_reqs, .main_clk = "i2c2_fck", @@ -2412,7 +2415,7 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = { .name = "i2c3", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_16BIT_REG, + .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c3_irqs, .sdma_reqs = omap44xx_i2c3_sdma_reqs, .main_clk = "i2c3_fck", @@ -2468,7 +2471,7 @@ static struct omap_hwmod omap44xx_i2c4_hwmod = { .name = "i2c4", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", - .flags = HWMOD_16BIT_REG, + .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c4_irqs, .sdma_reqs = omap44xx_i2c4_sdma_reqs, .main_clk = "i2c4_fck", @@ -5276,6 +5279,207 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = { .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer3_slaves), }; +/* + * 'usb_host_hs' class + * high-speed multi-port usb host controller + */ +static struct omap_hwmod_ocp_if omap44xx_usb_host_hs__l3_main_2 = { + .master = &omap44xx_usb_host_hs_hwmod, + .slave = &omap44xx_l3_main_2_hwmod, + .clk = "l3_div_ck", + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | + SYSC_HAS_SOFTRESET), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | + MSTANDBY_SMART | MSTANDBY_SMART_WKUP), + .sysc_fields = &omap_hwmod_sysc_type2, +}; + +static struct omap_hwmod_class omap44xx_usb_host_hs_hwmod_class = { + .name = "usb_host_hs", + .sysc = &omap44xx_usb_host_hs_sysc, +}; + +static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_masters[] = { + &omap44xx_usb_host_hs__l3_main_2, +}; + +static struct omap_hwmod_addr_space omap44xx_usb_host_hs_addrs[] = { + { + .name = "uhh", + .pa_start = 0x4a064000, + .pa_end = 0x4a0647ff, + .flags = ADDR_TYPE_RT + }, + { + .name = "ohci", + .pa_start = 0x4a064800, + .pa_end = 0x4a064bff, + }, + { + .name = "ehci", + .pa_start = 0x4a064c00, + .pa_end = 0x4a064fff, + }, + {} +}; + +static struct omap_hwmod_irq_info omap44xx_usb_host_hs_irqs[] = { + { .name = "ohci-irq", .irq = 76 + OMAP44XX_IRQ_GIC_START }, + { .name = "ehci-irq", .irq = 77 + OMAP44XX_IRQ_GIC_START }, + { .irq = -1 } +}; + +static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_hs = { + .master = &omap44xx_l4_cfg_hwmod, + .slave = &omap44xx_usb_host_hs_hwmod, + .clk = "l4_div_ck", + .addr = omap44xx_usb_host_hs_addrs, + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_slaves[] = { + &omap44xx_l4_cfg__usb_host_hs, +}; + +static struct omap_hwmod omap44xx_usb_host_hs_hwmod = { + .name = "usb_host_hs", + .class = &omap44xx_usb_host_hs_hwmod_class, + .clkdm_name = "l3_init_clkdm", + .main_clk = "usb_host_hs_fck", + .prcm = { + .omap4 = { + .clkctrl_offs = OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET, + .context_offs = OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .mpu_irqs = omap44xx_usb_host_hs_irqs, + .slaves = omap44xx_usb_host_hs_slaves, + .slaves_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_slaves), + .masters = omap44xx_usb_host_hs_masters, + .masters_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_masters), + + /* + * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock + * id: i660 + * + * Description: + * In the following configuration : + * - USBHOST module is set to smart-idle mode + * - PRCM asserts idle_req to the USBHOST module ( This typically + * happens when the system is going to a low power mode : all ports + * have been suspended, the master part of the USBHOST module has + * entered the standby state, and SW has cut the functional clocks) + * - an USBHOST interrupt occurs before the module is able to answer + * idle_ack, typically a remote wakeup IRQ. + * Then the USB HOST module will enter a deadlock situation where it + * is no more accessible nor functional. + * + * Workaround: + * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE + */ + + /* + * Errata: USB host EHCI may stall when entering smart-standby mode + * Id: i571 + * + * Description: + * When the USBHOST module is set to smart-standby mode, and when it is + * ready to enter the standby state (i.e. all ports are suspended and + * all attached devices are in suspend mode), then it can wrongly assert + * the Mstandby signal too early while there are still some residual OCP + * transactions ongoing. If this condition occurs, the internal state + * machine may go to an undefined state and the USB link may be stuck + * upon the next resume. + * + * Workaround: + * Don't use smart standby; use only force standby, + * hence HWMOD_SWSUP_MSTANDBY + */ + + /* + * During system boot; If the hwmod framework resets the module + * the module will have smart idle settings; which can lead to deadlock + * (above Errata Id:i660); so, dont reset the module during boot; + * Use HWMOD_INIT_NO_RESET. + */ + + .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | + HWMOD_INIT_NO_RESET, +}; + +/* + * 'usb_tll_hs' class + * usb_tll_hs module is the adapter on the usb_host_hs ports + */ +static struct omap_hwmod_class_sysconfig omap44xx_usb_tll_hs_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | + SYSC_HAS_AUTOIDLE), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + +static struct omap_hwmod_class omap44xx_usb_tll_hs_hwmod_class = { + .name = "usb_tll_hs", + .sysc = &omap44xx_usb_tll_hs_sysc, +}; + +static struct omap_hwmod_irq_info omap44xx_usb_tll_hs_irqs[] = { + { .name = "tll-irq", .irq = 78 + OMAP44XX_IRQ_GIC_START }, + { .irq = -1 } +}; + +static struct omap_hwmod_addr_space omap44xx_usb_tll_hs_addrs[] = { + { + .name = "tll", + .pa_start = 0x4a062000, + .pa_end = 0x4a063fff, + .flags = ADDR_TYPE_RT + }, + {} +}; + +static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_tll_hs = { + .master = &omap44xx_l4_cfg_hwmod, + .slave = &omap44xx_usb_tll_hs_hwmod, + .clk = "l4_div_ck", + .addr = omap44xx_usb_tll_hs_addrs, + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + +static struct omap_hwmod_ocp_if *omap44xx_usb_tll_hs_slaves[] = { + &omap44xx_l4_cfg__usb_tll_hs, +}; + +static struct omap_hwmod omap44xx_usb_tll_hs_hwmod = { + .name = "usb_tll_hs", + .class = &omap44xx_usb_tll_hs_hwmod_class, + .clkdm_name = "l3_init_clkdm", + .main_clk = "usb_tll_hs_ick", + .prcm = { + .omap4 = { + .clkctrl_offs = OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET, + .context_offs = OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET, + .modulemode = MODULEMODE_HWCTRL, + }, + }, + .mpu_irqs = omap44xx_usb_tll_hs_irqs, + .slaves = omap44xx_usb_tll_hs_slaves, + .slaves_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_slaves), +}; + static __initdata struct omap_hwmod *omap44xx_hwmods[] = { /* dmm class */ @@ -5415,13 +5619,16 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = { &omap44xx_uart3_hwmod, &omap44xx_uart4_hwmod, + /* usb host class */ + &omap44xx_usb_host_hs_hwmod, + &omap44xx_usb_tll_hs_hwmod, + /* usb_otg_hs class */ &omap44xx_usb_otg_hs_hwmod, /* wd_timer class */ &omap44xx_wd_timer2_hwmod, &omap44xx_wd_timer3_hwmod, - NULL, }; diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h index 0363dcb0ef93..da2d80f5fcbd 100644 --- a/arch/arm/mach-omap2/prcm-common.h +++ b/arch/arm/mach-omap2/prcm-common.h @@ -201,6 +201,8 @@ #define OMAP3430_EN_MMC2_SHIFT 25 #define OMAP3430_EN_MMC1_MASK (1 << 24) #define OMAP3430_EN_MMC1_SHIFT 24 +#define OMAP3430_EN_UART4_MASK (1 << 23) +#define OMAP3430_EN_UART4_SHIFT 23 #define OMAP3430_EN_MCSPI4_MASK (1 << 21) #define OMAP3430_EN_MCSPI4_SHIFT 21 #define OMAP3430_EN_MCSPI3_MASK (1 << 20) diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index cb53160f6c5d..26ebb57719df 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/suspend.h> #include <linux/slab.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c index ef555c041962..a12b689a8702 100644 --- a/arch/arm/mach-prima2/prima2.c +++ b/arch/arm/mach-prima2/prima2.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/kernel.h> +#include <asm/sizes.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <linux/of.h> diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c index 5e6b42089eb4..3341fd118723 100644 --- a/arch/arm/mach-s3c64xx/dev-spi.c +++ b/arch/arm/mach-s3c64xx/dev-spi.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/string.h> +#include <linux/export.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c index 7a3bc32df425..51c00f2453c6 100644 --- a/arch/arm/mach-s3c64xx/s3c6400.c +++ b/arch/arm/mach-s3c64xx/s3c6400.c @@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void) s3c64xx_init_irq(~0 & ~(0xf << 5), ~0); } -struct sysdev_class s3c6400_sysclass = { +static struct sysdev_class s3c6400_sysclass = { .name = "s3c6400-core", }; diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c index 83d2afb79e9f..2cf80026c58d 100644 --- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c +++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c @@ -20,7 +20,7 @@ #include <plat/fb.h> #include <plat/gpio-cfg.h> -extern void s3c64xx_fb_gpio_setup_24bpp(void) +void s3c64xx_fb_gpio_setup_24bpp(void) { s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2)); s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2)); diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot index 5a616f6e5612..f7951aa04562 100644 --- a/arch/arm/mach-sa1100/Makefile.boot +++ b/arch/arm/mach-sa1100/Makefile.boot @@ -1,5 +1,5 @@ -ifeq ($(CONFIG_ARCH_SA1100),y) - zreladdr-$(CONFIG_SA1111) += 0xc0208000 +ifeq ($(CONFIG_SA1111),y) + zreladdr-y += 0xc0208000 else zreladdr-y += 0xc0008000 endif diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index 30e10719b774..6e5d3a0cb9d5 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h @@ -357,7 +357,7 @@ #define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69 #define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70 #define INT_35XX_USBOTG_IRQ 71 -#define INT_35XX_UART4 84 +#define INT_35XX_UART4_IRQ 84 #define INT_35XX_CCDC_VD0_IRQ 88 #define INT_35XX_CCDC_VD1_IRQ 92 #define INT_35XX_CCDC_VD2_IRQ 93 diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h index 1ab9fd6abe6d..865a2ba7ffa8 100644 --- a/arch/arm/plat-omap/include/plat/serial.h +++ b/arch/arm/plat-omap/include/plat/serial.h @@ -44,6 +44,7 @@ #define OMAP3_UART2_BASE OMAP2_UART2_BASE #define OMAP3_UART3_BASE 0x49020000 #define OMAP3_UART4_BASE 0x49042000 /* Only on 36xx */ +#define OMAP3_UART4_AM35XX_BASE 0x4809E000 /* Only on AM35xx */ /* OMAP4 serial ports */ #define OMAP4_UART1_BASE OMAP2_UART1_BASE diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 43f984e93970..303192fc9260 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -350,10 +350,12 @@ #define __NR_clock_adjtime 342 #define __NR_syncfs 343 #define __NR_setns 344 +#define __NR_process_vm_readv 345 +#define __NR_process_vm_writev 346 #ifdef __KERNEL__ -#define NR_syscalls 345 +#define NR_syscalls 347 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index c468f2edaa85..ce827b376110 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -365,4 +365,6 @@ ENTRY(sys_call_table) .long sys_clock_adjtime .long sys_syncfs .long sys_setns + .long sys_process_vm_readv /* 345 */ + .long sys_process_vm_writev diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 4f2971bcf8e5..315fc0b250f8 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event) if (!atomic_inc_not_zero(&active_events)) { if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { atomic_dec(&active_events); - return -ENOSPC; + return -EINVAL; } mutex_lock(&pmu_reserve_mutex); @@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event) memset(&fake_cpuc, 0, sizeof(fake_cpuc)); if (!validate_event(&fake_cpuc, leader)) - return -ENOSPC; + return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_cpuc, sibling)) - return -ENOSPC; + return -EINVAL; } if (!validate_event(&fake_cpuc, event)) - return -ENOSPC; + return -EINVAL; return 0; } diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 524d23b8610c..4f289ff0b7fe 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -599,10 +599,10 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) skey = page_get_storage_key(address); bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); /* Clear page changed & referenced bit in the storage key */ - if (bits) { - skey ^= bits; - page_set_storage_key(address, skey, 1); - } + if (bits & _PAGE_CHANGED) + page_set_storage_key(address, skey ^ bits, 1); + else if (bits) + page_reset_referenced(address); /* Transfer page changed & referenced bit to guest bits in pgste */ pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */ /* Get host changed & referenced bits from pgste */ diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 450931a45b68..573bc29551ef 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)))) /* Invalid psw mask. */ return -EINVAL; - if (addr == (addr_t) &dummy->regs.psw.addr) - /* - * The debugger changed the instruction address, - * reset system call restart, see signal.c:do_signal - */ - task_thread_info(child)->system_call = 0; - *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { @@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child, /* Transfer 31 bit amode bit to psw mask. */ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); - /* - * The debugger changed the instruction address, - * reset system call restart, see signal.c:do_signal - */ - task_thread_info(child)->system_call = 0; } else { /* gpr 0-15 */ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; @@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target, return 0; } +static int s390_last_break_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return 0; +} + #endif static int s390_system_call_get(struct task_struct *target, @@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = { .size = sizeof(long), .align = sizeof(long), .get = s390_last_break_get, + .set = s390_last_break_set, }, #endif [REGSET_SYSTEM_CALL] = { @@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target, return 0; } +static int s390_compat_last_break_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return 0; +} + static const struct user_regset s390_compat_regsets[] = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, @@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = { .size = sizeof(long), .align = sizeof(long), .get = s390_compat_last_break_get, + .set = s390_compat_last_break_set, }, [REGSET_SYSTEM_CALL] = { .core_note_type = NT_S390_SYSTEM_CALL, diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index e58a462949b1..e54c4ff8abaa 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -579,7 +579,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size, *msg = "first memory chunk must be at least crashkernel size"; return 0; } - if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE)) + if (OLDMEM_BASE && crash_size == OLDMEM_SIZE) return OLDMEM_BASE; for (i = MEMORY_CHUNKS - 1; i >= 0; i--) { diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 05a85bc14c98..7f6f9f354545 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs) regs->svc_code >> 16); break; } - /* No longer in a system call */ - clear_thread_flag(TIF_SYSCALL); } + /* No longer in a system call */ + clear_thread_flag(TIF_SYSCALL); if ((is_compat_task() ? handle_signal32(signr, &ka, &info, oldset, regs) : @@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs) } /* No handlers present - check for system call restart */ + clear_thread_flag(TIF_SYSCALL); if (current_thread_info()->system_call) { regs->svc_code = current_thread_info()->system_call; switch (regs->gprs[2]) { @@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs) regs->gprs[2] = regs->orig_gpr2; set_thread_flag(TIF_SYSCALL); break; - default: - clear_thread_flag(TIF_SYSCALL); - break; } } diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index 94e9a511de84..f80f8ceabc67 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h @@ -74,16 +74,6 @@ enum { */ void tile_irq_activate(unsigned int irq, int tile_irq_type); -/* - * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know - * how to use enable/disable_percpu_irq() to manage interrupts on each - * core. We can't use the generic enable/disable_irq() because they - * use a single reference count per irq, rather than per cpu per irq. - */ -void enable_percpu_irq(unsigned int irq); -void disable_percpu_irq(unsigned int irq); - - void setup_irq_regs(void); #endif /* _ASM_TILE_IRQ_H */ diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index aa0134db2dd6..02e628065012 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) * Remove an irq from the disabled mask. If we're in an interrupt * context, defer enabling the HW interrupt until we leave. */ -void enable_percpu_irq(unsigned int irq) +static void tile_irq_chip_enable(struct irq_data *d) { - get_cpu_var(irq_disable_mask) &= ~(1UL << irq); + get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); if (__get_cpu_var(irq_depth) == 0) - unmask_irqs(1UL << irq); + unmask_irqs(1UL << d->irq); put_cpu_var(irq_disable_mask); } -EXPORT_SYMBOL(enable_percpu_irq); /* * Add an irq to the disabled mask. We disable the HW interrupt @@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq); * in an interrupt context, the return path is careful to avoid * unmasking a newly disabled interrupt. */ -void disable_percpu_irq(unsigned int irq) +static void tile_irq_chip_disable(struct irq_data *d) { - get_cpu_var(irq_disable_mask) |= (1UL << irq); - mask_irqs(1UL << irq); + get_cpu_var(irq_disable_mask) |= (1UL << d->irq); + mask_irqs(1UL << d->irq); put_cpu_var(irq_disable_mask); } -EXPORT_SYMBOL(disable_percpu_irq); /* Mask an interrupt. */ static void tile_irq_chip_mask(struct irq_data *d) @@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d) static struct irq_chip tile_irq_chip = { .name = "tile_irq_chip", + .irq_enable = tile_irq_chip_enable, + .irq_disable = tile_irq_chip_disable, .irq_ack = tile_irq_chip_ack, .irq_eoi = tile_irq_chip_eoi, .irq_mask = tile_irq_chip_mask, diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 658f2ce426a4..b3ed19f8779c 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -15,6 +15,7 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> +#include <linux/export.h> #include <asm/tlbflush.h> #include <asm/homecache.h> diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index 2a8014cb1ff5..9d610d3fb11e 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -24,6 +24,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/uaccess.h> +#include <linux/export.h> #include <asm/processor.h> #include <asm/sections.h> diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c index b671a86f4515..602908268093 100644 --- a/arch/tile/kernel/sysfs.c +++ b/arch/tile/kernel/sysfs.c @@ -18,6 +18,7 @@ #include <linux/cpu.h> #include <linux/slab.h> #include <linux/smp.h> +#include <linux/stat.h> #include <hv/hypervisor.h> /* Return a string queried from the hypervisor, truncated to page size. */ diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index a87d2a859ba9..2a81d32de0da 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c @@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm); EXPORT_SYMBOL(current_text_addr); EXPORT_SYMBOL(dump_stack); +/* arch/tile/kernel/head.S */ +EXPORT_SYMBOL(empty_zero_page); + /* arch/tile/lib/, various memcpy files */ EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(__copy_to_user_inatomic); diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index cbe6f4f9eca3..1cc6ae477c98 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order) VM_BUG_ON(!virt_addr_valid((void *)addr)); page = virt_to_page((void *)addr); if (put_page_testzero(page)) { - int pages = (1 << order); homecache_change_page_home(page, order, initial_page_home()); - while (pages--) - __free_page(page++); + if (order == 0) { + free_hot_cold_page(page, 0); + } else { + init_page_count(page); + __free_pages(page, order); + } } } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cb9a1044a771..efb42949cc09 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -390,7 +390,7 @@ config X86_INTEL_CE This option compiles in support for the CE4100 SOC for settop boxes and media devices. -config X86_INTEL_MID +config X86_WANT_INTEL_MID bool "Intel MID platform support" depends on X86_32 depends on X86_EXTENDED_PLATFORM @@ -399,7 +399,10 @@ config X86_INTEL_MID systems which do not have the PCI legacy interfaces (Moorestown, Medfield). If you are building for a PC class system say N here. -if X86_INTEL_MID +if X86_WANT_INTEL_MID + +config X86_INTEL_MID + bool config X86_MRST bool "Moorestown MID platform" @@ -411,6 +414,7 @@ config X86_MRST select SPI select INTEL_SCU_IPC select X86_PLATFORM_DEVICES + select X86_INTEL_MID ---help--- Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin Internet Device(MID) platform. Moorestown consists of two chips: diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 908b96957d88..c9547033e38e 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -53,6 +53,13 @@ */ #define E820_RESERVED_KERN 128 +/* + * Address ranges that need to be mapped by the kernel direct + * mapping. This is used to make sure regions such as + * EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch(). + */ +#define E820_RESERVED_EFI 129 + #ifndef __ASSEMBLY__ #include <linux/types.h> struct e820entry { @@ -115,6 +122,7 @@ static inline void early_memtest(unsigned long start, unsigned long end) } #endif +extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type); extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 7093e4a6a0bc..b8d8bfcd44a9 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -33,8 +33,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ efi_call_virt(f, a1, a2, a3, a4, a5, a6) -#define efi_ioremap(addr, size, type) ioremap_cache(addr, size) - #else /* !CONFIG_X86_32 */ extern u64 efi_call0(void *fp); @@ -84,9 +82,6 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) -extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, - u32 type); - #endif /* CONFIG_X86_32 */ extern int add_efi_memmap; diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h index 4420993acc47..925b605eb5c6 100644 --- a/arch/x86/include/asm/intel_scu_ipc.h +++ b/arch/x86/include/asm/intel_scu_ipc.h @@ -3,11 +3,15 @@ #include <linux/notifier.h> -#define IPCMSG_VRTC 0xFA /* Set vRTC device */ - -/* Command id associated with message IPCMSG_VRTC */ -#define IPC_CMD_VRTC_SETTIME 1 /* Set time */ -#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */ +#define IPCMSG_WARM_RESET 0xF0 +#define IPCMSG_COLD_RESET 0xF1 +#define IPCMSG_SOFT_RESET 0xF2 +#define IPCMSG_COLD_BOOT 0xF3 + +#define IPCMSG_VRTC 0xFA /* Set vRTC device */ + /* Command id associated with message IPCMSG_VRTC */ + #define IPC_CMD_VRTC_SETTIME 1 /* Set time */ + #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */ /* Read single register */ int intel_scu_ipc_ioread8(u16 addr, u8 *data); diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index e6283129c821..93f79094c224 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h @@ -31,11 +31,20 @@ enum mrst_cpu_type { }; extern enum mrst_cpu_type __mrst_cpu_chip; + +#ifdef CONFIG_X86_INTEL_MID + static inline enum mrst_cpu_type mrst_identify_cpu(void) { return __mrst_cpu_chip; } +#else /* !CONFIG_X86_INTEL_MID */ + +#define mrst_identify_cpu() (0) + +#endif /* !CONFIG_X86_INTEL_MID */ + enum mrst_timer_options { MRST_TIMER_DEFAULT, MRST_TIMER_APBT_ONLY, diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 084ef95274cd..95203d40ffdd 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) return native_write_msr_safe(msr, low, high); } -/* rdmsr with exception handling */ +/* + * rdmsr with exception handling. + * + * Please note that the exception handling works only after we've + * switched to the "smart" #GP handler in trap_init() which knows about + * exception tables - using this macro earlier than that causes machine + * hangs on boxes which do not implement the @msr in the first argument. + */ #define rdmsr_safe(msr, p1, p2) \ ({ \ int __err; \ diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index c2ff2a1d845e..2d2f01ce6dcb 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); +bool set_pm_idle_to_default(void); void stop_this_cpu(void *dummy); diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index fa7b9176b76c..431793e5d484 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -32,6 +32,22 @@ extern int no_timer_check; * (mathieu.desnoyers@polymtl.ca) * * -johnstul@us.ibm.com "math is hard, lets go shopping!" + * + * In: + * + * ns = cycles * cyc2ns_scale / SC + * + * Although we may still have enough bits to store the value of ns, + * in some cases, we may not have enough bits to store cycles * cyc2ns_scale, + * leading to an incorrect result. + * + * To avoid this, we can decompose 'cycles' into quotient and remainder + * of division by SC. Then, + * + * ns = (quot * SC + rem) * cyc2ns_scale / SC + * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC + * + * - sqazi@google.com */ DECLARE_PER_CPU(unsigned long, cyc2ns); @@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); static inline unsigned long long __cycles_2_ns(unsigned long long cyc) { + unsigned long long quot; + unsigned long long rem; int cpu = smp_processor_id(); unsigned long long ns = per_cpu(cyc2ns_offset, cpu); - ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR; + quot = (cyc >> CYC2NS_SCALE_FACTOR); + rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1); + ns += quot * per_cpu(cyc2ns, cpu) + + ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR); return ns; } diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index 10474fb1185d..cf1d73643f60 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -57,6 +57,7 @@ #define UV1_HUB_PART_NUMBER 0x88a5 #define UV2_HUB_PART_NUMBER 0x8eb8 +#define UV2_HUB_PART_NUMBER_X 0x1111 /* Compat: if this #define is present, UV headers support UV2 */ #define UV2_HUB_IS_SUPPORTED 1 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 62ae3001ae02..9d59bbacd4e3 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void) if (node_id.s.part_number == UV2_HUB_PART_NUMBER) uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; + if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X) + uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; uv_hub_info->hub_revision = uv_min_hub_revision_id; pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c7e46cb35327..0bab2b18bb20 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -442,8 +442,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) { - u32 dummy; - early_init_amd_mc(c); /* @@ -473,12 +471,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EXTD_APICID); } #endif - - rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); } static void __cpuinit init_amd(struct cpuinfo_x86 *c) { + u32 dummy; + #ifdef CONFIG_SMP unsigned long long value; @@ -657,6 +655,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); } } + + rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index a71efcdbb092..97b26356e9ee 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, if (tmp != mask_lo) { printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); + add_taint(TAINT_FIRMWARE_WORKAROUND); mask_lo = tmp; } } @@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) /* Disable MTRRs, and set the default type to uncached */ mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); + wbinvd(); } static void post_set(void) __releases(set_atomicity_lock) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 640891014b2a..2bda212a0010 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event) return -EOPNOTSUPP; } - /* - * Do not allow config1 (extended registers) to propagate, - * there's no sane user-space generalization yet: - */ if (attr->type == PERF_TYPE_RAW) - return 0; + return x86_pmu_extra_regs(event->attr.config, event); if (attr->type == PERF_TYPE_HW_CACHE) return set_ext_hw_attr(hwc, event); @@ -588,7 +584,7 @@ done: x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]); } } - return num ? -ENOSPC : 0; + return num ? -EINVAL : 0; } /* @@ -607,7 +603,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, if (is_x86_event(leader)) { if (n >= max_count) - return -ENOSPC; + return -EINVAL; cpuc->event_list[n] = leader; n++; } @@ -620,7 +616,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, continue; if (n >= max_count) - return -ENOSPC; + return -EINVAL; cpuc->event_list[n] = event; n++; @@ -1316,7 +1312,7 @@ static int validate_event(struct perf_event *event) c = x86_pmu.get_event_constraints(fake_cpuc, event); if (!c || !c->weight) - ret = -ENOSPC; + ret = -EINVAL; if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(fake_cpuc, event); @@ -1341,7 +1337,7 @@ static int validate_group(struct perf_event *event) { struct perf_event *leader = event->group_leader; struct cpu_hw_events *fake_cpuc; - int ret = -ENOSPC, n; + int ret = -EINVAL, n; fake_cpuc = allocate_fake_cpuc(); if (IS_ERR(fake_cpuc)) diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index ab6343d21825..3b8a2d30d14e 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void) goto out; } - pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset); - pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); + pr_info("IBS: LVT offset %d assigned\n", offset); return 0; out: @@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h static __init int amd_ibs_init(void) { u32 caps; - int ret; + int ret = -EINVAL; caps = __get_ibs_caps(); if (!caps) return -ENODEV; /* ibs not supported by the cpu */ - if (!ibs_eilvt_valid()) { - ret = force_ibs_eilvt_setup(); - if (ret) { - pr_err("Failed to setup IBS, %d\n", ret); - return ret; - } - } + /* + * Force LVT offset assignment for family 10h: The offsets are + * not assigned by the BIOS for this family, so the OS is + * responsible for doing it. If the OS assignment fails, fall + * back to BIOS settings and try to setup this. + */ + if (boot_cpu_data.x86 == 0x10) + force_ibs_eilvt_setup(); + + if (!ibs_eilvt_valid()) + goto out; get_online_cpus(); ibs_caps = caps; @@ -287,7 +290,11 @@ static __init int amd_ibs_init(void) smp_call_function(setup_APIC_ibs, NULL, 1); put_online_cpus(); - return perf_event_ibs_init(); + ret = perf_event_ibs_init(); +out: + if (ret) + pr_err("Failed to setup IBS, %d\n", ret); + return ret; } /* Since we need the pci subsystem to init ibs we can't do this earlier: */ diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 2be5ebe99872..8d601b18bf9f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1545,6 +1545,13 @@ static void intel_clovertown_quirks(void) x86_pmu.pebs_constraints = NULL; } +static void intel_sandybridge_quirks(void) +{ + printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); + x86_pmu.pebs = 0; + x86_pmu.pebs_constraints = NULL; +} + __init int intel_pmu_init(void) { union cpuid10_edx edx; @@ -1694,6 +1701,7 @@ __init int intel_pmu_init(void) break; case 42: /* SandyBridge */ + x86_pmu.quirks = intel_sandybridge_quirks; case 45: /* SandyBridge, "Romely-EP" */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index c0d238f49db8..73da6b64f5b7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) unsigned long from = cpuc->lbr_entries[0].from; unsigned long old_to, to = cpuc->lbr_entries[0].to; unsigned long ip = regs->ip; + int is_64bit = 0; /* * We don't need to fixup if the PEBS assist is fault like @@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) } else kaddr = (void *)to; - kernel_insn_init(&insn, kaddr); +#ifdef CONFIG_X86_64 + is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32); +#endif + insn_init(&insn, kaddr, is_64bit); insn_get_length(&insn); to += insn.length; } while (to < ip); diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 492bf1358a7c..ef484d9d0a25 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -1268,7 +1268,7 @@ reserve: } done: - return num ? -ENOSPC : 0; + return num ? -EINVAL : 0; } static __initconst const struct x86_pmu p4_pmu = { diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 303a0e48f076..65ffd110a81b 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -135,6 +135,7 @@ static void __init e820_print_type(u32 type) printk(KERN_CONT "(usable)"); break; case E820_RESERVED: + case E820_RESERVED_EFI: printk(KERN_CONT "(reserved)"); break; case E820_ACPI: @@ -783,7 +784,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) /* * Find the highest page frame number we have available */ -static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) +unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) { int i; unsigned long last_pfn = 0; diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index b946a9eac7d9..1bb0bf4d92cd 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void) } EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); +static void hpet_disable_rtc_channel(void) +{ + unsigned long cfg; + cfg = hpet_readl(HPET_T1_CFG); + cfg &= ~HPET_TN_ENABLE; + hpet_writel(cfg, HPET_T1_CFG); +} + /* * The functions below are called from rtc driver. * Return 0 if HPET is not being used. @@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask) return 0; hpet_rtc_flags &= ~bit_mask; + if (unlikely(!hpet_rtc_flags)) + hpet_disable_rtc_channel(); + return 1; } EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); @@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); static void hpet_rtc_timer_reinit(void) { - unsigned int cfg, delta; + unsigned int delta; int lost_ints = -1; - if (unlikely(!hpet_rtc_flags)) { - cfg = hpet_readl(HPET_T1_CFG); - cfg &= ~HPET_TN_ENABLE; - hpet_writel(cfg, HPET_T1_CFG); - return; - } + if (unlikely(!hpet_rtc_flags)) + hpet_disable_rtc_channel(); if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) delta = hpet_default_delta; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index acf8fbf8fbda..69bca468c47a 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs) #ifdef CONFIG_DEBUG_STACKOVERFLOW u64 curbase = (u64)task_stack_page(current); + if (user_mode_vm(regs)) + return; + WARN_ONCE(regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE && regs->sp < curbase + sizeof(struct thread_info) + diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index f2d2a664e797..9d46f5e43b51 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -256,7 +256,7 @@ static int __init microcode_dev_init(void) return 0; } -static void microcode_dev_exit(void) +static void __exit microcode_dev_exit(void) { misc_deregister(µcode_dev); } @@ -519,10 +519,8 @@ static int __init microcode_init(void) microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); - if (IS_ERR(microcode_pdev)) { - microcode_dev_exit(); + if (IS_ERR(microcode_pdev)) return PTR_ERR(microcode_pdev); - } get_online_cpus(); mutex_lock(µcode_mutex); @@ -532,14 +530,12 @@ static int __init microcode_init(void) mutex_unlock(µcode_mutex); put_online_cpus(); - if (error) { - platform_device_unregister(microcode_pdev); - return error; - } + if (error) + goto out_pdev; error = microcode_dev_init(); if (error) - return error; + goto out_sysdev_driver; register_syscore_ops(&mc_syscore_ops); register_hotcpu_notifier(&mc_cpu_notifier); @@ -548,6 +544,20 @@ static int __init microcode_init(void) " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n"); return 0; + +out_sysdev_driver: + get_online_cpus(); + mutex_lock(µcode_mutex); + + sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); + + mutex_unlock(µcode_mutex); + put_online_cpus(); + +out_pdev: + platform_device_unregister(microcode_pdev); + return error; + } module_init(microcode_init); diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 9103b89c145a..0741b062a304 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m) } #endif + set_bit(m->busid, mp_bus_not_pci); if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { - set_bit(m->busid, mp_bus_not_pci); #if defined(CONFIG_EISA) || defined(CONFIG_MCA) mp_bus_id_to_type[m->busid] = MP_BUS_ISA; #endif diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index b9b3b1a51643..ee5d4fbd53b4 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -403,6 +403,14 @@ void default_idle(void) EXPORT_SYMBOL(default_idle); #endif +bool set_pm_idle_to_default(void) +{ + bool ret = !!pm_idle; + + pm_idle = default_idle; + + return ret; +} void stop_this_cpu(void *dummy) { local_irq_disable(); diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index b78643d0f9a5..03920a15a632 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC, quirk_amd_nb_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK, quirk_amd_nb_node); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0, + quirk_amd_nb_node); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1, + quirk_amd_nb_node); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2, + quirk_amd_nb_node); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3, + quirk_amd_nb_node); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4, + quirk_amd_nb_node); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5, + quirk_amd_nb_node); + #endif diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e334be1182b9..37a458b521a6 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup); */ /* - * Some machines require the "reboot=b" commandline option, + * Some machines require the "reboot=b" or "reboot=k" commandline options, * this quirk makes that automatic. */ static int __init set_bios_reboot(const struct dmi_system_id *d) @@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d) return 0; } +static int __init set_kbd_reboot(const struct dmi_system_id *d) +{ + if (reboot_type != BOOT_KBD) { + reboot_type = BOOT_KBD; + printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident); + } + return 0; +} + static struct dmi_system_id __initdata reboot_dmi_table[] = { { /* Handle problems with rebooting on Dell E520's */ .callback = set_bios_reboot, @@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { }, }, { /* Handle reboot issue on Acer Aspire one */ - .callback = set_bios_reboot, + .callback = set_kbd_reboot, .ident = "Acer Aspire One A110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), }, }, + { /* Handle problems with rebooting on the OptiPlex 990. */ + .callback = set_pci_reboot, + .ident = "Dell OptiPlex 990", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), + }, + }, { } }; diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 348ce016a835..af6db6ec5b2a 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -12,6 +12,7 @@ #include <asm/vsyscall.h> #include <asm/x86_init.h> #include <asm/time.h> +#include <asm/mrst.h> #ifdef CONFIG_X86_32 /* @@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void) if (of_have_populated_dt()) return 0; + /* Intel MID platforms don't have ioport rtc */ + if (mrst_identify_cpu()) + return -ENODEV; + platform_device_register(&rtc_device); dev_info(&rtc_device.dev, "registered platform RTC device (no PNP device found)\n"); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index cf0ef986cb6d..9a9e40fb091c 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -691,6 +691,8 @@ early_param("reservelow", parse_reservelow); void __init setup_arch(char **cmdline_p) { + unsigned long end_pfn; + #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); visws_early_detect(); @@ -932,7 +934,24 @@ void __init setup_arch(char **cmdline_p) init_gbpages(); /* max_pfn_mapped is updated here */ - max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); + end_pfn = max_low_pfn; + +#ifdef CONFIG_X86_64 + /* + * There may be regions after the last E820_RAM region that we + * want to include in the kernel direct mapping, such as + * EFI_RUNTIME_SERVICES_DATA. + */ + if (efi_enabled) { + unsigned long efi_end; + + efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI); + if (efi_end > max_low_pfn) + end_pfn = efi_end; + } +#endif + + max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT); max_pfn_mapped = max_low_pfn_mapped; #ifdef CONFIG_X86_64 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index ea305856151c..dd74e46828c0 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr, do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; + if (PageTail(page)) + get_huge_page_tail(page); (*nr)++; page++; refs++; diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index b49962662101..f4f29b19fac5 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); + arch_flush_lazy_mmu_mode(); return (void *)vaddr; } @@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr) */ kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_DEBUG_HIGHMEM else { diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c index cdfe4c54deca..f148cf652678 100644 --- a/arch/x86/oprofile/init.c +++ b/arch/x86/oprofile/init.c @@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops); extern void op_nmi_exit(void); extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); +static int nmi_timer; int __init oprofile_arch_init(struct oprofile_operations *ops) { @@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) #ifdef CONFIG_X86_LOCAL_APIC ret = op_nmi_init(ops); #endif + nmi_timer = (ret != 0); #ifdef CONFIG_X86_IO_APIC - if (ret < 0) + if (nmi_timer) ret = op_nmi_timer_init(ops); #endif ops->backtrace = x86_backtrace; @@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) void oprofile_arch_exit(void) { #ifdef CONFIG_X86_LOCAL_APIC - op_nmi_exit(); + if (!nmi_timer) + op_nmi_exit(); #endif } diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 37718f0f053d..c9718a16be15 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -323,10 +323,13 @@ static void __init do_add_efi_memmap(void) case EFI_UNUSABLE_MEMORY: e820_type = E820_UNUSABLE; break; + case EFI_RUNTIME_SERVICES_DATA: + e820_type = E820_RESERVED_EFI; + break; default: /* * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE - * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO + * EFI_MEMORY_MAPPED_IO * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE */ e820_type = E820_RESERVED; @@ -671,10 +674,21 @@ void __init efi_enter_virtual_mode(void) end_pfn = PFN_UP(end); if (end_pfn <= max_low_pfn_mapped || (end_pfn > (1UL << (32 - PAGE_SHIFT)) - && end_pfn <= max_pfn_mapped)) + && end_pfn <= max_pfn_mapped)) { va = __va(md->phys_addr); - else - va = efi_ioremap(md->phys_addr, size, md->type); + + if (!(md->attribute & EFI_MEMORY_WB)) { + addr = (u64) (unsigned long)va; + npages = md->num_pages; + memrange_efi_to_native(&addr, &npages); + set_memory_uc(addr, npages); + } + } else { + if (!(md->attribute & EFI_MEMORY_WB)) + va = ioremap_nocache(md->phys_addr, size); + else + va = ioremap_cache(md->phys_addr, size); + } md->virt_addr = (u64) (unsigned long) va; @@ -684,13 +698,6 @@ void __init efi_enter_virtual_mode(void) continue; } - if (!(md->attribute & EFI_MEMORY_WB)) { - addr = md->virt_addr; - npages = md->num_pages; - memrange_efi_to_native(&addr, &npages); - set_memory_uc(addr, npages); - } - systab = (u64) (unsigned long) efi_phys.systab; if (md->phys_addr <= systab && systab < end) { systab += md->virt_addr - md->phys_addr; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac3aa54e2654..312250c6b2de 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -80,20 +80,3 @@ void __init efi_call_phys_epilog(void) local_irq_restore(efi_flags); early_code_mapping_set_exec(0); } - -void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, - u32 type) -{ - unsigned long last_map_pfn; - - if (type == EFI_MEMORY_MAPPED_IO) - return ioremap(phys_addr, size); - - last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); - if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { - unsigned long top = last_map_pfn << PAGE_SHIFT; - efi_ioremap(top, size - (top - phys_addr), type); - } - - return (void __iomem *)__va(phys_addr); -} diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index b1489a06a49d..ad4ec1cb097e 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX]; EXPORT_SYMBOL_GPL(sfi_mrtc_array); int sfi_mrtc_num; +static void mrst_power_off(void) +{ + if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1); +} + +static void mrst_reboot(void) +{ + if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); + else + intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); +} + /* parse all the mtimer info to a static mtimer array */ static int __init sfi_parse_mtmr(struct sfi_table_header *table) { @@ -265,17 +279,6 @@ static int mrst_i8042_detect(void) return 0; } -/* Reboot and power off are handled by the SCU on a MID device */ -static void mrst_power_off(void) -{ - intel_scu_ipc_simple_command(0xf1, 1); -} - -static void mrst_reboot(void) -{ - intel_scu_ipc_simple_command(0xf1, 0); -} - /* * Moorestown does not have external NMI source nor port 0x61 to report * NMI status. The possible NMI sources are from pmu as a result of NMI @@ -484,6 +487,46 @@ static void __init *max7315_platform_data(void *info) return max7315; } +static void *tca6416_platform_data(void *info) +{ + static struct pca953x_platform_data tca6416; + struct i2c_board_info *i2c_info = info; + int gpio_base, intr; + char base_pin_name[SFI_NAME_LEN + 1]; + char intr_pin_name[SFI_NAME_LEN + 1]; + + strcpy(i2c_info->type, "tca6416"); + strcpy(base_pin_name, "tca6416_base"); + strcpy(intr_pin_name, "tca6416_int"); + + gpio_base = get_gpio_by_name(base_pin_name); + intr = get_gpio_by_name(intr_pin_name); + + if (gpio_base == -1) + return NULL; + tca6416.gpio_base = gpio_base; + if (intr != -1) { + i2c_info->irq = intr + MRST_IRQ_OFFSET; + tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET; + } else { + i2c_info->irq = -1; + tca6416.irq_base = -1; + } + return &tca6416; +} + +static void *mpu3050_platform_data(void *info) +{ + struct i2c_board_info *i2c_info = info; + int intr = get_gpio_by_name("mpu3050_int"); + + if (intr == -1) + return NULL; + + i2c_info->irq = intr + MRST_IRQ_OFFSET; + return NULL; +} + static void __init *emc1403_platform_data(void *info) { static short intr2nd_pdata; @@ -646,12 +689,15 @@ static void *msic_ocd_platform_data(void *info) static const struct devs_id __initconst device_ids[] = { {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data}, {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data}, + {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data}, {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data}, {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data}, + {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data}, {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data}, {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data}, {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data}, + {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data}, /* MSIC subdevices */ {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data}, diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 38d0af4fefec..1093f80c162d 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -410,6 +410,6 @@ void __init xen_arch_setup(void) #endif disable_cpuidle(); boot_option_idle_override = IDLE_HALT; - + WARN_ON(set_pm_idle_to_default()); fiddle_vdso(); } diff --git a/drivers/base/core.c b/drivers/base/core.c index d8b3d89db043..919daa7cd5b1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1743,8 +1743,10 @@ void device_shutdown(void) */ list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); - /* Disable all device's runtime power management */ - pm_runtime_disable(dev); + + /* Don't allow any more runtime suspends */ + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); if (dev->bus && dev->bus->shutdown) { dev_dbg(dev, "shutdown\n"); diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c index f10fc521951b..1eedb6f7fdab 100644 --- a/drivers/firmware/sigma.c +++ b/drivers/firmware/sigma.c @@ -14,13 +14,34 @@ #include <linux/module.h> #include <linux/sigma.h> -/* Return: 0==OK, <0==error, =1 ==no more actions */ +static size_t sigma_action_size(struct sigma_action *sa) +{ + size_t payload = 0; + + switch (sa->instr) { + case SIGMA_ACTION_WRITEXBYTES: + case SIGMA_ACTION_WRITESINGLE: + case SIGMA_ACTION_WRITESAFELOAD: + payload = sigma_action_len(sa); + break; + default: + break; + } + + payload = ALIGN(payload, 2); + + return payload + sizeof(struct sigma_action); +} + +/* + * Returns a negative error value in case of an error, 0 if processing of + * the firmware should be stopped after this action, 1 otherwise. + */ static int -process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw) +process_sigma_action(struct i2c_client *client, struct sigma_action *sa) { - struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos); size_t len = sigma_action_len(sa); - int ret = 0; + int ret; pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__, sa->instr, sa->addr, len); @@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw) case SIGMA_ACTION_WRITEXBYTES: case SIGMA_ACTION_WRITESINGLE: case SIGMA_ACTION_WRITESAFELOAD: - if (ssfw->fw->size < ssfw->pos + len) - return -EINVAL; ret = i2c_master_send(client, (void *)&sa->addr, len); if (ret < 0) return -EINVAL; break; - case SIGMA_ACTION_DELAY: - ret = 0; udelay(len); len = 0; break; - case SIGMA_ACTION_END: - return 1; - + return 0; default: return -EINVAL; } - /* when arrive here ret=0 or sent data */ - ssfw->pos += sigma_action_size(sa, len); - return ssfw->pos == ssfw->fw->size; + return 1; } static int process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw) { - pr_debug("%s: processing %p\n", __func__, ssfw); + struct sigma_action *sa; + size_t size; + int ret; + + while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) { + sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos); + + size = sigma_action_size(sa); + ssfw->pos += size; + if (ssfw->pos > ssfw->fw->size || size == 0) + break; + + ret = process_sigma_action(client, sa); - while (1) { - int ret = process_sigma_action(client, ssfw); pr_debug("%s: action returned %i\n", __func__, ret); - if (ret == 1) - return 0; - else if (ret) + + if (ret <= 0) return ret; } + + if (ssfw->pos != ssfw->fw->size) + return -EINVAL; + + return 0; } int process_sigma_firmware(struct i2c_client *client, const char *name) @@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name) /* then verify the header */ ret = -EINVAL; - if (fw->size < sizeof(*ssfw_head)) + + /* + * Reject too small or unreasonable large files. The upper limit has been + * chosen a bit arbitrarily, but it should be enough for all practical + * purposes and having the limit makes it easier to avoid integer + * overflows later in the loading process. + */ + if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000) goto done; ssfw_head = (void *)fw->data; if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic))) goto done; - crc = crc32(0, fw->data, fw->size); + crc = crc32(0, fw->data + sizeof(*ssfw_head), + fw->size - sizeof(*ssfw_head)); pr_debug("%s: crc=%x\n", __func__, crc); - if (crc != ssfw_head->crc) + if (crc != le32_to_cpu(ssfw_head->crc)) goto done; ssfw.pos = sizeof(*ssfw_head); diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 5b2b9e26f49c..8ef9e9abe970 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o -obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o +obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 3969f7553fe7..d2619d72cece 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -456,6 +456,30 @@ done: EXPORT_SYMBOL(drm_crtc_helper_set_mode); +static int +drm_crtc_helper_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + + /* Decouple all encoders and their attached connectors from this crtc */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc != crtc) + continue; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder != encoder) + continue; + + connector->encoder = NULL; + } + } + + drm_helper_disable_unused_functions(dev); + return 0; +} + /** * drm_crtc_helper_set_config - set a new config from userspace * @crtc: CRTC to setup @@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) (int)set->num_connectors, set->x, set->y); } else { DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); - set->mode = NULL; - set->num_connectors = 0; + return drm_crtc_helper_disable(set->crtc); } dev = set->crtc->dev; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8359dc777041..60ff1b63b568 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2026,8 +2026,13 @@ i915_wait_request(struct intel_ring_buffer *ring, * to handle this, the waiter on a request often wants an associated * buffer to have made it to the inactive list, and we would need * a separate wait queue to handle that. + * + * To avoid a recursion with the ilk VT-d workaround (that calls + * gpu_idle when unbinding objects with interruptible==false) don't + * retire requests in that case (because it might call unbind if the + * active list holds the last reference to the object). */ - if (ret == 0) + if (ret == 0 && dev_priv->mm.interruptible) i915_gem_retire_requests_ring(ring); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index ddbabefb4273..b12fd2c80812 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, spin_unlock_irqrestore(&dev->event_lock, flags); return 0; } + +int +nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct nouveau_bo *bo; + int ret; + + args->pitch = roundup(args->width * (args->bpp / 8), 256); + args->size = args->pitch * args->height; + args->size = roundup(args->size, PAGE_SIZE); + + ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo); + if (ret) + return ret; + + ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); + drm_gem_object_unreference_unlocked(bo->gem); + return ret; +} + +int +nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file_priv, handle); +} + +int +nouveau_display_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle, uint64_t *poffset) +{ + struct drm_gem_object *gem; + + gem = drm_gem_object_lookup(dev, file_priv, handle); + if (gem) { + struct nouveau_bo *bo = gem->driver_private; + *poffset = bo->bo.addr_space_offset; + drm_gem_object_unreference_unlocked(gem); + return 0; + } + + return -ENOENT; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 9f7bb1295262..9791d13c9e3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -433,6 +433,10 @@ static struct drm_driver driver = { .gem_open_object = nouveau_gem_object_open, .gem_close_object = nouveau_gem_object_close, + .dumb_create = nouveau_display_dumb_create, + .dumb_map_offset = nouveau_display_dumb_map_offset, + .dumb_destroy = nouveau_display_dumb_destroy, + .name = DRIVER_NAME, .desc = DRIVER_DESC, #ifdef GIT_REVISION diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 29837da1098b..4c0be3a4ed88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event); int nouveau_finish_page_flip(struct nouveau_channel *, struct nouveau_page_flip_state *); +int nouveau_display_dumb_create(struct drm_file *, struct drm_device *, + struct drm_mode_create_dumb *args); +int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, + uint32_t handle, uint64_t *offset); +int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *, + uint32_t handle); /* nv10_gpio.c */ int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 02222c540aee..960c0ae0c0c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) return ret; } - ret = drm_mm_init(&chan->ramin_heap, base, size); + ret = drm_mm_init(&chan->ramin_heap, base, size - base); if (ret) { NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); nouveau_gpuobj_ref(NULL, &chan->ramin); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index b75258a9fe44..c8a463b76c89 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be) pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } + nvbe->unmap_pages = false; } + + nvbe->pages = NULL; } static void diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index d23ca00e7d62..06de250fe617 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_display *disp = nv50_display(dev); u32 unk30 = nv_rd32(dev, 0x610030), mc; - int i, crtc, or, type = OUTPUT_ANY; + int i, crtc, or = 0, type = OUTPUT_ANY; NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); disp->irq.dcb = NULL; @@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev) struct nv50_display *disp = nv50_display(dev); u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; struct dcb_entry *dcb; - int i, crtc, or, type = OUTPUT_ANY; + int i, crtc, or = 0, type = OUTPUT_ANY; NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); dcb = disp->irq.dcb; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index a74e501afd25..ecfafd70cf0e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) u8 tpnr[GPC_MAX]; int i, gpc, tpc; + nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */ + /* * TP ROP UNKVAL(magic_not_rop_nr) * 450: 4/0/0/0 2 3 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c index 23d63b4b3d77..cb006a718e70 100644 --- a/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/drivers/gpu/drm/nouveau/nvd0_display.c @@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode) continue; if (nv_partner != nv_encoder && - nv_partner->dcb->or == nv_encoder->or) { + nv_partner->dcb->or == nv_encoder->dcb->or) { if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) return; break; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 87631fede1f8..2b97262e3ab1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - if (tiling_flags & RADEON_TILING_MACRO) + if (tiling_flags & RADEON_TILING_MACRO) { + if (rdev->family >= CHIP_CAYMAN) + tmp = rdev->config.cayman.tile_config; + else + tmp = rdev->config.evergreen.tile_config; + + switch ((tmp & 0xf0) >> 4) { + case 0: /* 4 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); + break; + case 1: /* 8 banks */ + default: + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); + break; + case 2: /* 16 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); + break; + } + + switch ((tmp & 0xf000) >> 12) { + case 0: /* 1KB rows */ + default: + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB); + break; + case 1: /* 2KB rows */ + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB); + break; + case 2: /* 4KB rows */ + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB); + break; + } + fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); - else if (tiling_flags & RADEON_TILING_MICRO) + } else if (tiling_flags & RADEON_TILING_MICRO) fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); switch (radeon_crtc->crtc_id) { diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 1d603a3335db..5e00d1670aa9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= EVERGREEN_GRPH_UPDATE_LOCK; @@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 38e1bda73d33..cd4590aae154 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -38,6 +38,7 @@ struct evergreen_cs_track { u32 group_size; u32 nbanks; u32 npipes; + u32 row_size; /* value we track */ u32 nsamples; u32 cb_color_base_last[12]; @@ -77,6 +78,44 @@ struct evergreen_cs_track { struct radeon_bo *db_s_write_bo; }; +static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) +{ + if (tiling_flags & RADEON_TILING_MACRO) + return ARRAY_2D_TILED_THIN1; + else if (tiling_flags & RADEON_TILING_MICRO) + return ARRAY_1D_TILED_THIN1; + else + return ARRAY_LINEAR_GENERAL; +} + +static u32 evergreen_cs_get_num_banks(u32 nbanks) +{ + switch (nbanks) { + case 2: + return ADDR_SURF_2_BANK; + case 4: + return ADDR_SURF_4_BANK; + case 8: + default: + return ADDR_SURF_8_BANK; + case 16: + return ADDR_SURF_16_BANK; + } +} + +static u32 evergreen_cs_get_tile_split(u32 row_size) +{ + switch (row_size) { + case 1: + default: + return ADDR_SURF_TILE_SPLIT_1KB; + case 2: + return ADDR_SURF_TILE_SPLIT_2KB; + case 4: + return ADDR_SURF_TILE_SPLIT_4KB; + } +} + static void evergreen_cs_track_init(struct evergreen_cs_track *track) { int i; @@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } ib[idx] &= ~Z_ARRAY_MODE(0xf); track->db_z_info &= ~Z_ARRAY_MODE(0xf); + ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else { - ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); } } break; @@ -618,13 +656,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - } + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } break; case CB_COLOR8_INFO: @@ -640,13 +673,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - } + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } break; case CB_COLOR0_PITCH: @@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR9_ATTRIB: case CB_COLOR10_ATTRIB: case CB_COLOR11_ATTRIB: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + } break; case CB_COLOR0_DIM: case CB_COLOR1_DIM: @@ -1318,10 +1356,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); if (!p->keep_tiling_flags) { - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + ib[idx+1+(i*8)+1] |= + TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx+1+(i*8)+6] |= + TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + ib[idx+1+(i*8)+7] |= + TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + } } texture = reloc->robj; /* tex mip base */ @@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; struct evergreen_cs_track *track; + u32 tmp; int r; if (p->track == NULL) { @@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) if (track == NULL) return -ENOMEM; evergreen_cs_track_init(track); - track->npipes = p->rdev->config.evergreen.tiling_npipes; - track->nbanks = p->rdev->config.evergreen.tiling_nbanks; - track->group_size = p->rdev->config.evergreen.tiling_group_size; + if (p->rdev->family >= CHIP_CAYMAN) + tmp = p->rdev->config.cayman.tile_config; + else + tmp = p->rdev->config.evergreen.tile_config; + + switch (tmp & 0xf) { + case 0: + track->npipes = 1; + break; + case 1: + default: + track->npipes = 2; + break; + case 2: + track->npipes = 4; + break; + case 3: + track->npipes = 8; + break; + } + + switch ((tmp & 0xf0) >> 4) { + case 0: + track->nbanks = 4; + break; + case 1: + default: + track->nbanks = 8; + break; + case 2: + track->nbanks = 16; + break; + } + + switch ((tmp & 0xf00) >> 8) { + case 0: + track->group_size = 256; + break; + case 1: + default: + track->group_size = 512; + break; + } + + switch ((tmp & 0xf000) >> 12) { + case 0: + track->row_size = 1; + break; + case 1: + default: + track->row_size = 2; + break; + case 2: + track->row_size = 4; + break; + } + p->track = track; } do { diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index c781c92c3451..7d7f2155e34c 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -42,6 +42,17 @@ # define EVERGREEN_GRPH_DEPTH_8BPP 0 # define EVERGREEN_GRPH_DEPTH_16BPP 1 # define EVERGREEN_GRPH_DEPTH_32BPP 2 +# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) +# define EVERGREEN_ADDR_SURF_2_BANK 0 +# define EVERGREEN_ADDR_SURF_4_BANK 1 +# define EVERGREEN_ADDR_SURF_8_BANK 2 +# define EVERGREEN_ADDR_SURF_16_BANK 3 +# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4) +# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3 # define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) /* 8 BPP */ # define EVERGREEN_GRPH_FORMAT_INDEXED 0 @@ -61,6 +72,24 @@ # define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 # define EVERGREEN_GRPH_FORMAT_RGB111110 6 # define EVERGREEN_GRPH_FORMAT_BGR101111 7 +# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3 +# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6 +# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3 # define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) # define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 # define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b937c49054d9..e00039e59a75 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -899,6 +899,10 @@ #define DB_HTILE_DATA_BASE 0x28014 #define DB_Z_INFO 0x28040 # define Z_ARRAY_MODE(x) ((x) << 4) +# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8) +# define DB_NUM_BANKS(x) (((x) & 0x3) << 12) +# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16) +# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20) #define DB_STENCIL_INFO 0x28044 #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c @@ -951,6 +955,29 @@ # define CB_SF_EXPORT_FULL 0 # define CB_SF_EXPORT_NORM 1 #define CB_COLOR0_ATTRIB 0x28c74 +# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5) +# define ADDR_SURF_TILE_SPLIT_64B 0 +# define ADDR_SURF_TILE_SPLIT_128B 1 +# define ADDR_SURF_TILE_SPLIT_256B 2 +# define ADDR_SURF_TILE_SPLIT_512B 3 +# define ADDR_SURF_TILE_SPLIT_1KB 4 +# define ADDR_SURF_TILE_SPLIT_2KB 5 +# define ADDR_SURF_TILE_SPLIT_4KB 6 +# define CB_NUM_BANKS(x) (((x) & 0x3) << 10) +# define ADDR_SURF_2_BANK 0 +# define ADDR_SURF_4_BANK 1 +# define ADDR_SURF_8_BANK 2 +# define ADDR_SURF_16_BANK 3 +# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13) +# define ADDR_SURF_BANK_WIDTH_1 0 +# define ADDR_SURF_BANK_WIDTH_2 1 +# define ADDR_SURF_BANK_WIDTH_4 2 +# define ADDR_SURF_BANK_WIDTH_8 3 +# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16) +# define ADDR_SURF_BANK_HEIGHT_1 0 +# define ADDR_SURF_BANK_HEIGHT_2 1 +# define ADDR_SURF_BANK_HEIGHT_4 2 +# define ADDR_SURF_BANK_HEIGHT_8 3 #define CB_COLOR0_DIM 0x28c78 /* only CB0-7 blocks have these regs */ #define CB_COLOR0_CMASK 0x28c7c @@ -1137,7 +1164,11 @@ # define SQ_SEL_1 5 #define SQ_TEX_RESOURCE_WORD5_0 0x30014 #define SQ_TEX_RESOURCE_WORD6_0 0x30018 +# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29) #define SQ_TEX_RESOURCE_WORD7_0 0x3001c +# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8) +# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10) +# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16) #define SQ_VTX_CONSTANT_WORD0_0 0x30000 #define SQ_VTX_CONSTANT_WORD1_0 0x30004 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ad158ea49901..bfc08f6320f8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; + int i; /* Lock the graphics update lock */ /* update the scanout addresses */ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); /* Wait for update_pending to go high. */ - while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 3f6636bb2d7f..3516a6081dcf 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle) /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); + DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", + acpi_format_exception(status)); kfree(buffer.pointer); return 1; } @@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev) acpi_handle handle; int ret; - /* No need to proceed if we're sure that ATIF is not supported */ - if (!ASIC_IS_AVIVO(rdev) || !rdev->bios) - return 0; - /* Get the device handle */ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + /* No need to proceed if we're sure that ATIF is not supported */ + if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) + return 0; + /* Call the ATIF method */ ret = radeon_atif_call(handle); if (ret) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 06e413e6a920..4b27efa4405b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: - return true; + return radeon_encoder->encoder_id; default: - return false; + return ENCODER_OBJECT_ID_NONE; } } - - return false; + return ENCODER_OBJECT_ID_NONE; } void radeon_panel_mode_fixup(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 481b99e89f65..b1053d640423 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= AVIVO_D1GRPH_UPDATE_LOCK; @@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index a983f410ab89..23ae1c60ab3d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= AVIVO_D1GRPH_UPDATE_LOCK; @@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 3f6343502d1f..5ff561d4e0b4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, goto out_clips; } - clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); + clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; @@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, goto out_clips; } - clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); + clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 880e285d7578..37d40545ed77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1809,7 +1809,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); - rects = kzalloc(rects_size, GFP_KERNEL); + rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), + GFP_KERNEL); if (unlikely(!rects)) { ret = -ENOMEM; goto out_unlock; @@ -1824,10 +1825,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, } for (i = 0; i < arg->num_outputs; ++i) { - if (rects->x < 0 || - rects->y < 0 || - rects->x + rects->w > mode_config->max_width || - rects->y + rects->h > mode_config->max_height) { + if (rects[i].x < 0 || + rects[i].y < 0 || + rects[i].x + rects[i].w > mode_config->max_width || + rects[i].y + rects[i].h > mode_config->max_height) { DRM_ERROR("Invalid GUI layout.\n"); ret = -EINVAL; goto out_free; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 848a56c0279c..af353842f75f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 06ce996b8b65..4a441a6f9967 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -266,7 +266,7 @@ #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc -#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003 #define USB_VENDOR_ID_GLAB 0x06c2 #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c0c7820d4c46..a004c3945c67 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3524,7 +3524,7 @@ found: return 0; } -int dmar_parse_rmrr_atsr_dev(void) +int __init dmar_parse_rmrr_atsr_dev(void) { struct dmar_rmrr_unit *rmrr, *rmrr_n; struct dmar_atsr_unit *atsr, *atsr_n; diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c index 07c9f189f314..6777ca049471 100644 --- a/drivers/iommu/intr_remapping.c +++ b/drivers/iommu/intr_remapping.c @@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void) return ir_supported; } -int ir_dev_scope_init(void) +int __init ir_dev_scope_init(void) { if (!intr_remapping_enabled) return 0; diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 33ec9e467772..9021182c4b76 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c @@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg) case IIOCDOCFINT: if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) return (-EINVAL); /* invalid driver */ + if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) == + sizeof(dioctl.cf_ctrl.msn)) + return -EINVAL; + if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) == + sizeof(dioctl.cf_ctrl.fwd_nr)) + return -EINVAL; if ((i = cf_command(dioctl.cf_ctrl.drvid, (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, dioctl.cf_ctrl.cfproc, diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 1f73d7f7e024..2339d7396b9e 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) char *c, *e; + if (strnlen(cfg->drvid, sizeof(cfg->drvid)) == + sizeof(cfg->drvid)) + return -EINVAL; drvidx = -1; chidx = -1; strcpy(drvid, cfg->drvid); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7878712721bf..b6907118283a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap) */ int i; + spin_lock_irq(&bitmap->lock); for (i = 0; i < bitmap->file_pages; i++) set_page_attr(bitmap, bitmap->filemap[i], BITMAP_PAGE_NEEDWRITE); bitmap->allclean = 0; + spin_unlock_irq(&bitmap->lock); } static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) @@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) for (chunk = s; chunk <= e; chunk++) { sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); bitmap_set_memory_bits(bitmap, sec, 1); + spin_lock_irq(&bitmap->lock); bitmap_file_set_bit(bitmap, sec); + spin_unlock_irq(&bitmap->lock); if (sec < bitmap->mddev->recovery_cp) /* We are asserting that the array is dirty, * so move the recovery_cp address back so diff --git a/drivers/md/md.c b/drivers/md/md.c index 84acfe7d10e4..ee981737edfc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev) mddev->ctime == 0 && !mddev->hold_active) { /* Array is not configured at all, and not held active, * so destroy it */ - list_del(&mddev->all_mddevs); + list_del_init(&mddev->all_mddevs); bs = mddev->bio_set; mddev->bio_set = NULL; if (mddev->gendisk) { @@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page) sep = ","; } if (test_bit(Blocked, &rdev->flags) || - rdev->badblocks.unacked_exist) { + (rdev->badblocks.unacked_exist + && !test_bit(Faulty, &rdev->flags))) { len += sprintf(page+len, "%sblocked", sep); sep = ","; } @@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) if (err) return err; else { + if (mddev->hold_active == UNTIL_IOCTL) + mddev->hold_active = 0; sysfs_notify_dirent_safe(mddev->sysfs_state); return len; } @@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) if (!entry->show) return -EIO; + spin_lock(&all_mddevs_lock); + if (list_empty(&mddev->all_mddevs)) { + spin_unlock(&all_mddevs_lock); + return -EBUSY; + } + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + rv = mddev_lock(mddev); if (!rv) { rv = entry->show(mddev, page); mddev_unlock(mddev); } + mddev_put(mddev); return rv; } @@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + spin_lock(&all_mddevs_lock); + if (list_empty(&mddev->all_mddevs)) { + spin_unlock(&all_mddevs_lock); + return -EBUSY; + } + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); rv = mddev_lock(mddev); - if (mddev->hold_active == UNTIL_IOCTL) - mddev->hold_active = 0; if (!rv) { rv = entry->store(mddev, page, length); mddev_unlock(mddev); } + mddev_put(mddev); return rv; } @@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, s + rdev->data_offset, sectors, acknowledged); if (rv) { /* Make sure they get written out promptly */ + sysfs_notify_dirent_safe(rdev->sysfs_state); set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); md_wakeup_thread(rdev->mddev->thread); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 297e26092178..31670f8d6b65 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (dev->written) s->written++; rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(Faulty, &rdev->flags)) + rdev = NULL; if (rdev) { is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, &first_bad, &bad_sectors); @@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } } else if (test_bit(In_sync, &rdev->flags)) set_bit(R5_Insync, &dev->flags); - else if (!test_bit(Faulty, &rdev->flags)) { + else { /* in sync if before recovery_offset */ if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) set_bit(R5_Insync, &dev->flags); } - if (test_bit(R5_WriteError, &dev->flags)) { + if (rdev && test_bit(R5_WriteError, &dev->flags)) { clear_bit(R5_Insync, &dev->flags); if (!test_bit(Faulty, &rdev->flags)) { s->handle_bad_blocks = 1; @@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } else clear_bit(R5_WriteError, &dev->flags); } - if (test_bit(R5_MadeGood, &dev->flags)) { + if (rdev && test_bit(R5_MadeGood, &dev->flags)) { if (!test_bit(Faulty, &rdev->flags)) { s->handle_bad_blocks = 1; atomic_inc(&rdev->nr_pending); diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig index a73d9dc80ff6..84fb6349a59a 100644 --- a/drivers/net/arcnet/Kconfig +++ b/drivers/net/arcnet/Kconfig @@ -4,7 +4,7 @@ menuconfig ARCNET depends on NETDEVICES && (ISA || PCI || PCMCIA) - bool "ARCnet support" + tristate "ARCnet support" ---help--- If you have a network card of this type, say Y and check out the (arguably) beautiful poetry in diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b0c577256487..7f8756825b8a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2553,30 +2553,6 @@ re_arm: } } -static __be32 bond_glean_dev_ip(struct net_device *dev) -{ - struct in_device *idev; - struct in_ifaddr *ifa; - __be32 addr = 0; - - if (!dev) - return 0; - - rcu_read_lock(); - idev = __in_dev_get_rcu(dev); - if (!idev) - goto out; - - ifa = idev->ifa_list; - if (!ifa) - goto out; - - addr = ifa->ifa_local; -out: - rcu_read_unlock(); - return addr; -} - static int bond_has_this_ip(struct bonding *bond, __be32 ip) { struct vlan_entry *vlan; @@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, struct bonding *bond; struct vlan_entry *vlan; + /* we only care about primary address */ + if(ifa->ifa_flags & IFA_F_SECONDARY) + return NOTIFY_DONE; + list_for_each_entry(bond, &bn->dev_list, bond_list) { if (bond->dev == event_dev) { switch (event) { @@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, bond->master_ip = ifa->ifa_local; return NOTIFY_OK; case NETDEV_DOWN: - bond->master_ip = bond_glean_dev_ip(bond->dev); + bond->master_ip = 0; return NOTIFY_OK; default: return NOTIFY_DONE; @@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, vlan->vlan_ip = ifa->ifa_local; return NOTIFY_OK; case NETDEV_DOWN: - vlan->vlan_ip = - bond_glean_dev_ip(vlan_dev); + vlan->vlan_ip = 0; return NOTIFY_OK; default: return NOTIFY_DONE; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 905bce0b3a43..2c7f5036f570 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -20,7 +20,6 @@ */ #include <linux/kernel.h> -#include <linux/version.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 4cf835dbc122..3fb66d09ece5 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp) skb->len, DMA_TO_DEVICE); rp->skb = NULL; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); } bp->tx_cons = cons; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index bce203fa4b9e..882f48f0a03c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, return 0; } + +static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + u16 temp; + + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL1); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= 0xff00; + + DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode); + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + temp |= 0x00ee; + break; + case LED_MODE_OPER: + temp |= 0x0001; + break; + case LED_MODE_ON: + temp |= 0x00ff; + break; + default: + break; + } + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + return; +} + + static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, struct link_params *params) { @@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = { .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, .phy_specific_func = (phy_specific_func_t)NULL }; /*****************************************************************/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index fc7bd0f23c0b..e58073ef33b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_REG_INTR_MASK 0x1b #define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) #define MDIO_REG_GPHY_SHADOW 0x1c +#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10) #define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) #define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 438f4580bf66..2a22f5256353 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) if (!dm->wake_state) irq_set_irq_wake(dm->irq_wake, 1); - else if (dm->wake_state & !opts) + else if (dm->wake_state && !opts) irq_set_irq_wake(dm->irq_wake, 0); } diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index c520cfd3b298..5272f9d4dda9 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -24,6 +24,7 @@ config FEC bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ ARCH_MXC || ARCH_MXS) + default ARCH_MXC || ARCH_MXS if ARM select PHYLIB ---help--- Say Y here if you want to use the built-in 10/100 Fast ethernet diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h index 410d6a1984ed..6650068c996c 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea.h +++ b/drivers/net/ethernet/ibm/ehea/ehea.h @@ -61,9 +61,9 @@ #ifdef EHEA_SMALL_QUEUES #define EHEA_MAX_CQE_COUNT 1023 #define EHEA_DEF_ENTRIES_SQ 1023 -#define EHEA_DEF_ENTRIES_RQ1 4095 +#define EHEA_DEF_ENTRIES_RQ1 1023 #define EHEA_DEF_ENTRIES_RQ2 1023 -#define EHEA_DEF_ENTRIES_RQ3 1023 +#define EHEA_DEF_ENTRIES_RQ3 511 #else #define EHEA_MAX_CQE_COUNT 4080 #define EHEA_DEF_ENTRIES_SQ 4080 diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 37b70f7052b6..bfeccbfde236 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work) out_herr: free_page((unsigned long)cb2); resched: - schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); + schedule_delayed_work(&port->stats_work, + round_jiffies_relative(msecs_to_jiffies(1000))); } static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) @@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev) } mutex_unlock(&port->port_lock); - schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); + schedule_delayed_work(&port->stats_work, + round_jiffies_relative(msecs_to_jiffies(1000))); return ret; } diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c index 4326681df382..acc31af6594a 100644 --- a/drivers/net/ethernet/ibm/iseries_veth.c +++ b/drivers/net/ethernet/ibm/iseries_veth.c @@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx, /* FIXME: do we need this? */ memset(local_list, 0, sizeof(local_list)); - memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG)); + memset(remote_list, 0, sizeof(remote_list)); /* a 0 address marks the end of the valid entries */ if (senddata->addr[startchunk] == 0) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7becff1f387d..76b84573566b 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme) } static int +jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_READ | specreg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); + return jme_mdio_read(jme->dev, jme->mii_if.phy_id, + JM_PHY_SPEC_DATA_REG); +} + +static void +jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, + phy_data); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); +} + +static int +jme_phy_calibration(struct jme_adapter *jme) +{ + u32 ctrl1000, phy_data; + + jme_phy_off(jme); + jme_phy_on(jme); + /* Enabel PHY test mode 1 */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + ctrl1000 |= PHY_GAD_TEST_MODE_1; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; + phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | + JM_PHY_EXT_COMM_2_CALI_ENABLE; + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + msleep(20); + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | + JM_PHY_EXT_COMM_2_CALI_MODE_0 | + JM_PHY_EXT_COMM_2_CALI_LATCH); + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + + /* Disable PHY test mode */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + return 0; +} + +static int +jme_phy_setEA(struct jme_adapter *jme) +{ + u32 phy_comm0 = 0, phy_comm1 = 0; + u8 nic_ctrl; + + pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); + if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) + return 0; + + switch (jme->pdev->device) { + case PCI_DEVICE_ID_JMICRON_JMC250: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + break; + case PCI_DEVICE_ID_JMICRON_JMC260: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) + phy_comm0 = 0x608A; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) + phy_comm0 = 0x408A; + break; + default: + return -ENODEV; + } + if (phy_comm0) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); + if (phy_comm1) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); + + return 0; +} + +static int jme_open(struct net_device *netdev) { struct jme_adapter *jme = netdev_priv(netdev); @@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev) jme_set_settings(netdev, &jme->old_ecmd); else jme_reset_phy_processor(jme); - + jme_phy_calibration(jme); + jme_phy_setEA(jme); jme_reset_link(jme); return 0; @@ -3184,7 +3291,8 @@ jme_resume(struct device *dev) jme_set_settings(netdev, &jme->old_ecmd); else jme_reset_phy_processor(jme); - + jme_phy_calibration(jme); + jme_phy_setEA(jme); jme_start_irq(jme); netif_device_attach(netdev); @@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, jme_pci_tbl); - diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 02ea27c1dcb5..4304072bd3c5 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -760,6 +760,25 @@ enum jme_rxmcs_bits { RXMCS_CHECKSUM, }; +/* Extern PHY common register 2 */ + +#define PHY_GAD_TEST_MODE_1 0x00002000 +#define PHY_GAD_TEST_MODE_MSK 0x0000E000 +#define JM_PHY_SPEC_REG_READ 0x00004000 +#define JM_PHY_SPEC_REG_WRITE 0x00008000 +#define PHY_CALIBRATION_DELAY 20 +#define JM_PHY_SPEC_ADDR_REG 0x1E +#define JM_PHY_SPEC_DATA_REG 0x1F + +#define JM_PHY_EXT_COMM_0_REG 0x30 +#define JM_PHY_EXT_COMM_1_REG 0x31 +#define JM_PHY_EXT_COMM_2_REG 0x32 +#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01 +#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02 +#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10 +#define PCI_PRIV_SHARE_NICCTRL 0xF5 +#define JME_FLAG_PHYEA_ENABLE 0x2 + /* * Wakeup Frame setup interface registers */ diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile index 05db5434bafc..90497ffb1ac3 100644 --- a/drivers/net/ethernet/pasemi/Makefile +++ b/drivers/net/ethernet/pasemi/Makefile @@ -2,4 +2,5 @@ # Makefile for the A Semi network device drivers. # -obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o +obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o +pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 8731f79c9efc..b8478aab050e 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -58,10 +58,8 @@ #define TX_DESC_PER_IOCB 8 -/* The maximum number of frags we handle is based - * on PAGE_SIZE... - */ -#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */ + +#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0 #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) #else /* all other page sizes */ #define TX_DESC_PER_OAL 0 @@ -1353,7 +1351,7 @@ struct tx_ring_desc { struct ob_mac_iocb_req *queue_entry; u32 index; struct oal oal; - struct map_list map[MAX_SKB_FRAGS + 1]; + struct map_list map[MAX_SKB_FRAGS + 2]; int map_cnt; struct tx_ring_desc *next; }; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 6f06aa10f0d7..67bf07819992 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) return value; } -static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { - RTL_W16(IntrMask, 0x0000); + void __iomem *ioaddr = tp->mmio_addr; - RTL_W16(IntrStatus, 0xffff); + RTL_W16(IntrMask, 0x0000); + RTL_W16(IntrStatus, tp->intr_event); + RTL_R8(ChipCmd); } static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) @@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) break; udelay(100); } - - rtl8169_init_ring_indexes(tp); } static int __devinit @@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; /* Disable interrupts */ - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); rtl_rx_close(tp); @@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_22) { + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } @@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) { + tp->intr_event &= ~RxFIFOOver; + tp->napi_event &= ~RxFIFOOver; + } + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || tp->mac_version == RTL_GIGA_MAC_VER_16) { int cap = pci_pcie_cap(pdev); @@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) /* Wait for any pending NAPI task to complete */ napi_disable(&tp->napi); - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); @@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work) if (!netif_running(dev)) goto out_unlock; + rtl8169_hw_reset(tp); + rtl8169_wait_for_quiescence(dev); for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); - rtl8169_hw_reset(tp); rtl_hw_start(dev); netif_wake_queue(dev); rtl8169_check_link_status(dev, tp, tp->mmio_addr); @@ -5407,11 +5413,6 @@ out_unlock: static void rtl8169_tx_timeout(struct net_device *dev) { - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_hw_reset(tp); - - /* Let's wait a bit while any (async) irq lands on */ rtl8169_schedule_work(dev, rtl8169_reset_task); } @@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) */ status = RTL_R16(IntrStatus); while (status && status != 0xffff) { + status &= tp->intr_event; + if (!status) + break; + handled = 1; /* Handle all of the error cases first. These will reset @@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) switch (tp->mac_version) { /* Work around for rx fifo overflow */ case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_26: netif_stop_queue(dev); rtl8169_tx_timeout(dev); goto done; - /* Testers needed. */ - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - /* Experimental science. Pktgen proof. */ - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_25: - if (status == RxFIFOOver) - goto done; - break; default: break; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8ea770a89f25..72cd190b9c1a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -781,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - /* Do not manage MMC IRQ (FIXME) */ + /* Mask MMC irq, counters are managed in SW and registers + * are cleared on each READ eventually. */ dwmac_mmc_intr_all_mask(priv->ioaddr); - dwmac_mmc_ctrl(priv->ioaddr, mode); - memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + + if (priv->dma_cap.rmon) { + dwmac_mmc_ctrl(priv->ioaddr, mode); + memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + } else + pr_info(" No MAC Management Counters available"); } static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) @@ -1012,8 +1017,7 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - if (priv->dma_cap.rmon) - stmmac_mmc_setup(priv); + stmmac_mmc_setup(priv); /* Start the ball rolling... */ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 10826d8a2a2d..1187a1169eb2 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) goto done; /* Re-enable the ingress interrupt. */ - enable_percpu_irq(priv->intr_id); + enable_percpu_irq(priv->intr_id, 0); /* HACK: Avoid the "rotting packet" problem (see above). */ if (qup->__packet_receive_read != @@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr) info->napi_enabled = true; /* Enable the ingress interrupt. */ - enable_percpu_irq(priv->intr_id); + enable_percpu_irq(priv->intr_id, 0); } @@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, for (i = 0; i < sh->nr_frags; i++) { skb_frag_t *f = &sh->frags[i]; - unsigned long pfn = page_to_pfn(f->page); + unsigned long pfn = page_to_pfn(skb_frag_page(f)); /* FIXME: Compute "hash_for_home" properly. */ /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ @@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, /* FIXME: Hmmm. */ if (!hash_default) { void *va = pfn_to_kaddr(pfn) + f->page_offset; - BUG_ON(PageHighMem(f->page)); + BUG_ON(PageHighMem(skb_frag_page(f))); finv_buffer_remote(va, f->size, 0); } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index bb88e12101c7..a70244306c94 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -3,7 +3,7 @@ # menuconfig PHYLIB - bool "PHY Device support and infrastructure" + tristate "PHY Device support and infrastructure" depends on !S390 depends on NETDEVICES help diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index e12b48c2cff6..dd008b0e6417 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = { .chain_noise_scale = 1000, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 128, + .wd_disable = true, }; static struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index c511c98a89a8..f55fb2d1af52 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = { .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .no_idle_support = true, + .wd_disable = true, }; static struct iwl_ht_params iwl5000_ht_params = { .ht_greenfield_support = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 58a381c01c89..a7a6def40d05 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) return 0; } +void iwlagn_config_ht40(struct ieee80211_conf *conf, + struct iwl_rxon_context *ctx) +{ + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } +} + int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) { struct iwl_priv *priv = hw->priv; @@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) ctx->ht.enabled = conf_is_ht(conf); if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } + /* if HT40 is used, it should not change + * after associated except channel switch */ + if (iwl_is_associated_ctx(ctx) && + !ctx->ht.is_40mhz) + iwlagn_config_ht40(conf, ctx); } else ctx->ht.is_40mhz = false; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index ed6283623932..4b2aa1da0953 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv, switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_TKIP: - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - if (sta) addr = sta->addr; else /* station mode case only */ @@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv, seq.tkip.iv32, p1k, CMD_SYNC); break; case WLAN_CIPHER_SUITE_CCMP: - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - /* fall through */ case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: ret = iwlagn_send_sta_key(priv, keyconf, sta_id, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index ccba69b7f8a7..bacc06c95e7a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + /* fall through */ + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + default: + break; + } + /* * We could program these keys into the hardware as well, but we * don't expect much multicast traffic in IBSS and having keys @@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, /* Configure HT40 channels */ ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else + if (ctx->ht.enabled) + iwlagn_config_ht40(conf, ctx); + else ctx->ht.is_40mhz = false; if ((le16_to_cpu(ctx->staging.channel) != ch)) @@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); -module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); +module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO); MODULE_PARM_DESC(wd_disable, - "Disable stuck queue watchdog timer (default: 0 [enabled])"); + "Disable stuck queue watchdog timer 0=system default, " + "1=disable, 2=enable (default: 0)"); /* * set bt_coex_active to true, uCode will do kill/defer diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index 5b936ec1a541..3856abaea507 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes); +void iwlagn_config_ht40(struct ieee80211_conf *conf, + struct iwl_rxon_context *ctx); /* uCode */ int iwlagn_rx_calib_result(struct iwl_priv *priv, diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 001fdf140abb..fcf54160e4ed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv) { unsigned int timeout = priv->cfg->base_params->wd_timeout; - if (timeout && !iwlagn_mod_params.wd_disable) - mod_timer(&priv->watchdog, - jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); - else - del_timer(&priv->watchdog); + if (!iwlagn_mod_params.wd_disable) { + /* use system default */ + if (timeout && !priv->cfg->base_params->wd_disable) + mod_timer(&priv->watchdog, + jiffies + + msecs_to_jiffies(IWL_WD_TICK(timeout))); + else + del_timer(&priv->watchdog); + } else { + /* module parameter overwrite default configuration */ + if (timeout && iwlagn_mod_params.wd_disable == 2) + mod_timer(&priv->watchdog, + jiffies + + msecs_to_jiffies(IWL_WD_TICK(timeout))); + else + del_timer(&priv->watchdog); + } } /** diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 137da3380704..f2fc288f3dd3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -113,6 +113,7 @@ struct iwl_lib_ops { * @shadow_reg_enable: HW shadhow register bit * @no_idle_support: do not support idle mode * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up + * wd_disable: disable watchdog timer */ struct iwl_base_params { int eeprom_size; @@ -134,6 +135,7 @@ struct iwl_base_params { const bool shadow_reg_enable; const bool no_idle_support; const bool hd_v2; + const bool wd_disable; }; /* * @advanced_bt_coexist: support advanced bt coexist diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h index 1f7a93c67c45..14eaf37ce3b1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-shared.h +++ b/drivers/net/wireless/iwlwifi/iwl-shared.h @@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params; * @restart_fw: restart firmware, default = 1 * @plcp_check: enable plcp health check, default = true * @ack_check: disable ack health check, default = false - * @wd_disable: enable stuck queue check, default = false + * @wd_disable: enable stuck queue check, default = 0 * @bt_coex_active: enable bt coex, default = true * @led_mode: system default, default = 0 * @no_sleep_autoadjust: disable autoadjust, default = true @@ -141,7 +141,7 @@ struct iwl_mod_params { int restart_fw; bool plcp_check; bool ack_check; - bool wd_disable; + int wd_disable; bool bt_coex_active; int led_mode; bool no_sleep_autoadjust; diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index f18df82eeb92..78d0d6988553 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev) WARN_ON(priv->fw_state != FW_STATE_READY); - cancel_work_sync(&priv->work); - p54spi_power_off(priv); spin_lock_irqsave(&priv->tx_lock, flags); INIT_LIST_HEAD(&priv->tx_pending); @@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev) priv->fw_state = FW_STATE_OFF; mutex_unlock(&priv->mutex); + + cancel_work_sync(&priv->work); } static int __devinit p54spi_probe(struct spi_device *spi) @@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) init_completion(&priv->fw_comp); INIT_LIST_HEAD(&priv->tx_pending); mutex_init(&priv->mutex); + spin_lock_init(&priv->tx_lock); SET_IEEE80211_DEV(hw, &spi->dev); priv->common.open = p54spi_op_start; priv->common.stop = p54spi_op_stop; diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index d97a2caf582b..bc2ba80c47bb 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, dwrq->flags = 0; dwrq->length = 0; } - essid->octets[essid->length] = '\0'; + essid->octets[dwrq->length] = '\0'; memcpy(extra, essid->octets, dwrq->length); kfree(essid); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 3f183a15186e..1ba079dffb11 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) /* Apparently the data is read from end to start */ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®); /* The returned value is in CPU order, but eeprom is le */ - rt2x00dev->eeprom[i] = cpu_to_le32(reg); + *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg); rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®); *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®); diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index db5262844543..55c8e50f45fd 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) if (mac->link_state != MAC80211_LINKED) return; - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irq(&rtlpriv->locks.lps_lock); /* Idle for a while if we connect to AP a while ago. */ if (mac->cnt_after_linked >= 2) { @@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) } } - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irq(&rtlpriv->locks.lps_lock); } /*Leave the leisure power save mode.*/ @@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + unsigned long flags; - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags); if (ppsc->fwctrl_lps) { if (ppsc->dot11_psmode != EACTIVE) { @@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw) rtl_lps_set_psmode(hw, EACTIVE); } } - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags); } /* For sw LPS*/ @@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irq(&rtlpriv->locks.lps_lock); rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irq(&rtlpriv->locks.lps_lock); } void rtl_swlps_rfon_wq_callback(void *data) @@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) if (rtlpriv->link_info.busytraffic) return; - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irq(&rtlpriv->locks.lps_lock); rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irq(&rtlpriv->locks.lps_lock); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 0cb594c86090..15e332d08c8d 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, pending_idx = *((u16 *)skb->data); xen_netbk_idx_release(netbk, pending_idx); for (j = start; j < i; j++) { - pending_idx = frag_get_pending_idx(&shinfo->frags[i]); + pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xen_netbk_idx_release(netbk, pending_idx); } @@ -1668,7 +1668,7 @@ static int __init netback_init(void) "netback/%u", group); if (IS_ERR(netbk->task)) { - printk(KERN_ALERT "kthread_run() fails at netback\n"); + printk(KERN_ALERT "kthread_create() fails at netback\n"); del_timer(&netbk->net_timer); rc = PTR_ERR(netbk->task); goto failed_init; diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 19c0115092dd..0f0cfa3bca30 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -26,11 +26,6 @@ #include <linux/string.h> #include <linux/slab.h> -/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */ -#ifndef NO_IRQ -#define NO_IRQ 0 -#endif - /** * irq_of_parse_and_map - Parse and map an interrupt into linux virq space * @device: Device node of the device whose interrupt is to be mapped @@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index) struct of_irq oirq; if (of_irq_map_one(dev, index, &oirq)) - return NO_IRQ; + return 0; return irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); @@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) /* Only dereference the resource if both the * resource and the irq are valid. */ - if (r && irq != NO_IRQ) { + if (r && irq) { r->start = r->end = irq; r->flags = IORESOURCE_IRQ; r->name = dev->full_name; @@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev) { int nr = 0; - while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ) + while (of_irq_to_resource(dev, nr, NULL)) nr++; return nr; @@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int i; for (i = 0; i < nr_irqs; i++, res++) - if (of_irq_to_resource(dev, i, res) == NO_IRQ) + if (!of_irq_to_resource(dev, i, res)) break; return i; diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index dccd8636095c..f8c752e408a6 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val) return err; } +static int timer_mode; + static int __init oprofile_init(void) { int err; + /* always init architecture to setup backtrace support */ err = oprofile_arch_init(&oprofile_ops); - if (err < 0 || timer) { - printk(KERN_INFO "oprofile: using timer interrupt.\n"); + + timer_mode = err || timer; /* fall back to timer mode on errors */ + if (timer_mode) { + if (!err) + oprofile_arch_exit(); err = oprofile_timer_init(&oprofile_ops); if (err) return err; } - return oprofilefs_register(); + + err = oprofilefs_register(); + if (!err) + return 0; + + /* failed */ + if (timer_mode) + oprofile_timer_exit(); + else + oprofile_arch_exit(); + + return err; } static void __exit oprofile_exit(void) { - oprofile_timer_exit(); oprofilefs_unregister(); - oprofile_arch_exit(); + if (timer_mode) + oprofile_timer_exit(); + else + oprofile_arch_exit(); } diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 3ef44624f510..878fba126582 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops) ops->start = oprofile_hrtimer_start; ops->stop = oprofile_hrtimer_stop; ops->cpu_type = "timer"; + printk(KERN_INFO "oprofile: using timer interrupt.\n"); return 0; } diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 13ef8c37471d..dcdc1f4a4624 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -121,6 +121,7 @@ struct toshiba_acpi_dev { int illumination_supported:1; int video_supported:1; int fan_supported:1; + int system_event_supported:1; struct mutex mutex; }; @@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v) u32 hci_result; u32 value; - if (!dev->key_event_valid) { + if (!dev->key_event_valid && dev->system_event_supported) { hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); if (hci_result == HCI_SUCCESS) { dev->key_event_valid = 1; @@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev) /* enable event fifo */ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); + if (hci_result == HCI_SUCCESS) + dev->system_event_supported = 1; props.type = BACKLIGHT_PLATFORM; props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; @@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) { struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); u32 hci_result, value; + int retries = 3; - if (event != 0x80) + if (!dev->system_event_supported || event != 0x80) return; + do { hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); - if (hci_result == HCI_SUCCESS) { + switch (hci_result) { + case HCI_SUCCESS: if (value == 0x100) continue; /* act on key press; ignore key release */ @@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) pr_info("Unknown key %x\n", value); } - } else if (hci_result == HCI_NOT_SUPPORTED) { + break; + case HCI_NOT_SUPPORTED: /* This is a workaround for an unresolved issue on * some machines where system events sporadically * become disabled. */ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); pr_notice("Re-enabled hotkeys\n"); + /* fall through */ + default: + retries--; + break; } - } while (hci_result != HCI_EMPTY); + } while (retries && hci_result != HCI_EMPTY); } diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c index cffcb7c00b00..01fa671ec97f 100644 --- a/drivers/power/intel_mid_battery.c +++ b/drivers/power/intel_mid_battery.c @@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages."); #define PMIC_BATT_CHR_SBATDET_MASK (1 << 5) #define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6) #define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7) -#define PMIC_BATT_CHR_EXCPT_MASK 0xC6 +#define PMIC_BATT_CHR_EXCPT_MASK 0x86 + #define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31) #define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF @@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi) pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT); batt_exception = 1; - } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) { - pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; - pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; - pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT); - batt_exception = 1; } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) { pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT; pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; @@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi) batt_exception = 1; } else { pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD; + if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) { + /* PMIC will change charging current automatically */ + pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT); + } } } diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index cf3f9997546d..10451a15e828 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm) static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) { - return 1; /* always round timer functions to one nanosecond */ + tp->tv_sec = 0; + tp->tv_nsec = 1; + return 0; } static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 5225930a10cd..691b1ab1a3d0 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv) INIT_WORK(&priv->idb_work, tsi721_db_dpc); /* Allocate buffer for inbound doorbells queue */ - priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, + priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, &priv->idb_dma, GFP_KERNEL); if (!priv->idb_base) return -ENOMEM; - memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE); - dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", priv->idb_base, (unsigned long long)priv->idb_dma); @@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) */ /* Allocate space for DMA descriptors */ - bd_ptr = dma_alloc_coherent(&priv->pdev->dev, + bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, bd_num * sizeof(struct tsi721_dma_desc), &bd_phys, GFP_KERNEL); if (!bd_ptr) @@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) priv->bdma[chnum].bd_phys = bd_phys; priv->bdma[chnum].bd_base = bd_ptr; - memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc)); - dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", bd_ptr, (unsigned long long)bd_phys); @@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? bd_num : TSI721_DMA_MINSTSSZ; sts_size = roundup_pow_of_two(sts_size); - sts_ptr = dma_alloc_coherent(&priv->pdev->dev, + sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, sts_size * sizeof(struct tsi721_dma_sts), &sts_phys, GFP_KERNEL); if (!sts_ptr) { @@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) priv->bdma[chnum].sts_base = sts_ptr; priv->bdma[chnum].sts_size = sts_size; - memset(sts_ptr, 0, sts_size); - dev_dbg(&priv->pdev->dev, "desc status FIFO @ %p (phys = %llx) size=0x%x\n", sts_ptr, (unsigned long long)sts_phys, sts_size); @@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, /* Outbound message descriptor status FIFO allocation */ priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); - priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); @@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, goto out_desc; } - memset(priv->omsg_ring[mbox].sts_base, 0, - entries * sizeof(struct tsi721_dma_sts)); - /* * Configure Outbound Messaging Engine */ @@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv) INIT_LIST_HEAD(&mport->dbells); rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); - rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); - rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); + rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); + rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); strcpy(mport->name, "Tsi721 mport"); /* Hook up interrupt handler */ @@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int i; + int i, cap; int err; u32 regval; @@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); } - /* Clear "no snoop" and "relaxed ordering" bits. */ - pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, ®val); - regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN); - pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval); + cap = pci_pcie_cap(pdev); + BUG_ON(cap == 0); + + /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ + pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, ®val); + regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN); + regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; + pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval); + + /* Adjust PCIe completion timeout. */ + pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, ®val); + regval &= ~(0x0f); + pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2); /* * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 58be4deb1402..822e54c394d5 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -72,6 +72,8 @@ #define TSI721_MSIXPBA_OFFSET 0x2a000 #define TSI721_PCIECFG_EPCTL 0x400 +#define MAX_READ_REQUEST_SZ_SHIFT 12 + /* * Event Management Registers */ diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index e8326f26fa2f..dc4c2748bbc3 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg) */ delta = timespec_sub(old_system, old_rtc); delta_delta = timespec_sub(delta, old_delta); - if (abs(delta_delta.tv_sec) >= 2) { + if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) { /* * if delta_delta is too large, assume time correction * has occured and set old_delta to the current delta. @@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev) rtc_tm_to_time(&tm, &new_rtc.tv_sec); new_rtc.tv_nsec = 0; - if (new_rtc.tv_sec <= old_rtc.tv_sec) { - if (new_rtc.tv_sec < old_rtc.tv_sec) - pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); + if (new_rtc.tv_sec < old_rtc.tv_sec) { + pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); return 0; } @@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev) sleep_time = timespec_sub(sleep_time, timespec_sub(new_system, old_system)); - timekeeping_inject_sleeptime(&sleep_time); + if (sleep_time.tv_sec >= 0) + timekeeping_inject_sleeptime(&sleep_time); return 0; } diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 8e286259a007..fa4d9f324189 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_read_alarm); +static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + + if (!rtc->ops) + err = -ENODEV; + else if (!rtc->ops->set_alarm) + err = -EINVAL; + else + err = rtc->ops->set_alarm(rtc->dev.parent, alarm); + + return err; +} + static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { struct rtc_time tm; @@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) * over right here, before we set the alarm. */ - if (!rtc->ops) - err = -ENODEV; - else if (!rtc->ops->set_alarm) - err = -EINVAL; - else - err = rtc->ops->set_alarm(rtc->dev.parent, alarm); - - return err; + return ___rtc_set_alarm(rtc, alarm); } int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) @@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) return 0; } +static void rtc_alarm_disable(struct rtc_device *rtc) +{ + struct rtc_wkalrm alarm; + struct rtc_time tm; + + __rtc_read_time(rtc, &tm); + + alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm), + ktime_set(300, 0))); + alarm.enabled = 0; + + ___rtc_set_alarm(rtc, &alarm); +} + /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device @@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); - if (!next) + if (!next) { + rtc_alarm_disable(rtc); return; + } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); @@ -847,7 +870,8 @@ again: err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) goto again; - } + } else + rtc_alarm_disable(rtc); mutex_unlock(&rtc->ops_lock); } diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 7639ab906f02..5b979d9cc332 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) void __iomem *base = s3c_rtc_base; int year = tm->tm_year - 100; - clk_enable(rtc_clk); pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n", 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); @@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) return -EINVAL; } + clk_enable(rtc_clk); writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC); writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN); writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 75c3f1f8fd43..a84631a7391d 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) int chsc_chp_vary(struct chp_id chpid, int on) { struct channel_path *chp = chpid_to_chp(chpid); - struct chp_link link; - memset(&link, 0, sizeof(struct chp_link)); - link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); /* @@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on) /* Try to update the channel path descritor. */ chsc_determine_base_channel_path_desc(chpid, &chp->desc); for_each_subchannel_staged(s390_subchannel_vary_chpid_on, - __s390_vary_chpid_on, &link); + __s390_vary_chpid_on, &chpid); } else for_each_subchannel_staged(s390_subchannel_vary_chpid_off, - NULL, &link); + NULL, &chpid); return 0; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 155a82bcb9e5..4a1ff5c2eb88 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -68,8 +68,13 @@ struct schib { __u8 mda[4]; /* model dependent area */ } __attribute__ ((packed,aligned(4))); +/* + * When rescheduled, todo's with higher values will overwrite those + * with lower values. + */ enum sch_todo { SCH_TODO_NOTHING, + SCH_TODO_EVAL, SCH_TODO_UNREG, }; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 92d7324acb1c..21908e67bf67 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch) } EXPORT_SYMBOL_GPL(css_sch_device_unregister); -static void css_sch_todo(struct work_struct *work) -{ - struct subchannel *sch; - enum sch_todo todo; - - sch = container_of(work, struct subchannel, todo_work); - /* Find out todo. */ - spin_lock_irq(sch->lock); - todo = sch->todo; - CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, - sch->schid.sch_no, todo); - sch->todo = SCH_TODO_NOTHING; - spin_unlock_irq(sch->lock); - /* Perform todo. */ - if (todo == SCH_TODO_UNREG) - css_sch_device_unregister(sch); - /* Release workqueue ref. */ - put_device(&sch->dev); -} - -/** - * css_sched_sch_todo - schedule a subchannel operation - * @sch: subchannel - * @todo: todo - * - * Schedule the operation identified by @todo to be performed on the slow path - * workqueue. Do nothing if another operation with higher priority is already - * scheduled. Needs to be called with subchannel lock held. - */ -void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) -{ - CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", - sch->schid.ssid, sch->schid.sch_no, todo); - if (sch->todo >= todo) - return; - /* Get workqueue ref. */ - if (!get_device(&sch->dev)) - return; - sch->todo = todo; - if (!queue_work(cio_work_q, &sch->todo_work)) { - /* Already queued, release workqueue ref. */ - put_device(&sch->dev); - } -} - static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) { int i; @@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow) css_schedule_eval(schid); } +/** + * css_sched_sch_todo - schedule a subchannel operation + * @sch: subchannel + * @todo: todo + * + * Schedule the operation identified by @todo to be performed on the slow path + * workqueue. Do nothing if another operation with higher priority is already + * scheduled. Needs to be called with subchannel lock held. + */ +void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) +{ + CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", + sch->schid.ssid, sch->schid.sch_no, todo); + if (sch->todo >= todo) + return; + /* Get workqueue ref. */ + if (!get_device(&sch->dev)) + return; + sch->todo = todo; + if (!queue_work(cio_work_q, &sch->todo_work)) { + /* Already queued, release workqueue ref. */ + put_device(&sch->dev); + } +} + +static void css_sch_todo(struct work_struct *work) +{ + struct subchannel *sch; + enum sch_todo todo; + int ret; + + sch = container_of(work, struct subchannel, todo_work); + /* Find out todo. */ + spin_lock_irq(sch->lock); + todo = sch->todo; + CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, + sch->schid.sch_no, todo); + sch->todo = SCH_TODO_NOTHING; + spin_unlock_irq(sch->lock); + /* Perform todo. */ + switch (todo) { + case SCH_TODO_NOTHING: + break; + case SCH_TODO_EVAL: + ret = css_evaluate_known_subchannel(sch, 1); + if (ret == -EAGAIN) { + spin_lock_irq(sch->lock); + css_sched_sch_todo(sch, todo); + spin_unlock_irq(sch->lock); + } + break; + case SCH_TODO_UNREG: + css_sch_device_unregister(sch); + break; + } + /* Release workqueue ref. */ + put_device(&sch->dev); +} + static struct idset *slow_subchannel_set; static spinlock_t slow_subchannel_lock; static wait_queue_head_t css_eval_wq; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index d734f4a0ecac..47269858ecb6 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) */ cdev->private->flags.resuming = 1; cdev->private->path_new_mask = LPM_ANYPATH; - css_schedule_eval(sch->schid); + css_sched_sch_todo(sch, SCH_TODO_EVAL); spin_unlock_irq(sch->lock); - css_complete_work(); + css_wait_for_slow_path(); /* cdev may have been moved to a different subchannel. */ sch = to_subchannel(cdev->dev.parent); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 52c233fa2b12..1b853513c891 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev) cdev->private->pgid_reset_mask = 0; } -void -ccw_device_verify_done(struct ccw_device *cdev, int err) +static void create_fake_irb(struct irb *irb, int type) +{ + memset(irb, 0, sizeof(*irb)); + if (type == FAKE_CMD_IRB) { + struct cmd_scsw *scsw = &irb->scsw.cmd; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; + } else if (type == FAKE_TM_IRB) { + struct tm_scsw *scsw = &irb->scsw.tm; + scsw->x = 1; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; + } +} + +void ccw_device_verify_done(struct ccw_device *cdev, int err) { struct subchannel *sch; @@ -520,12 +538,8 @@ callback: ccw_device_done(cdev, DEV_STATE_ONLINE); /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { - memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->irb.scsw.cmd.cc = 1; - cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; - cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; - cdev->private->irb.scsw.cmd.stctl = - SCSW_STCTL_STATUS_PEND; + create_fake_irb(&cdev->private->irb, + cdev->private->flags.fake_irb); cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index f98698d5735e..ec7fb6d3b479 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, if (cdev->private->state == DEV_STATE_VERIFY) { /* Remember to fake irb when finished. */ if (!cdev->private->flags.fake_irb) { - cdev->private->flags.fake_irb = 1; + cdev->private->flags.fake_irb = FAKE_CMD_IRB; cdev->private->intparm = intparm; return 0; } else @@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, ret = cio_set_options (sch, flags); if (ret) return ret; - /* Adjust requested path mask to excluded varied off paths. */ + /* Adjust requested path mask to exclude unusable paths. */ if (lpm) { - lpm &= sch->opm; + lpm &= sch->lpm; if (lpm == 0) return -EACCES; } @@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, sch = to_subchannel(cdev->dev.parent); if (!sch->schib.pmcw.ena) return -EINVAL; + if (cdev->private->state == DEV_STATE_VERIFY) { + /* Remember to fake irb when finished. */ + if (!cdev->private->flags.fake_irb) { + cdev->private->flags.fake_irb = FAKE_TM_IRB; + cdev->private->intparm = intparm; + return 0; + } else + /* There's already a fake I/O around. */ + return -EBUSY; + } if (cdev->private->state != DEV_STATE_ONLINE) return -EIO; - /* Adjust requested path mask to excluded varied off paths. */ + /* Adjust requested path mask to exclude unusable paths. */ if (lpm) { - lpm &= sch->opm; + lpm &= sch->lpm; if (lpm == 0) return -EACCES; } diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 2ebb492a5c17..76253dfcc1be 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -111,6 +111,9 @@ enum cdev_todo { CDEV_TODO_UNREG_EVAL, }; +#define FAKE_CMD_IRB 1 +#define FAKE_TM_IRB 2 + struct ccw_device_private { struct ccw_device *cdev; struct subchannel *sch; @@ -138,7 +141,7 @@ struct ccw_device_private { unsigned int doverify:1; /* delayed path verification */ unsigned int donotify:1; /* call notify function */ unsigned int recog_done:1; /* dev. recog. complete */ - unsigned int fake_irb:1; /* deliver faked irb */ + unsigned int fake_irb:2; /* deliver faked irb */ unsigned int resuming:1; /* recognition while resume */ unsigned int pgroup:1; /* pathgroup is set up */ unsigned int mpath:1; /* multipathing is set up */ diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index ec94f049e995..96bbe9d12a79 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev) rc = ap_init_queue(ap_dev->qid); if (rc == -ENODEV) ap_dev->unregistered = 1; + else + __ap_schedule_poll_timer(); } static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index a1fd73df5416..8ba4510a9519 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -199,7 +199,7 @@ config SPI_FSL_LIB depends on FSL_SOC config SPI_FSL_SPI - tristate "Freescale SPI controller" + bool "Freescale SPI controller" depends on FSL_SOC select SPI_FSL_LIB help @@ -208,7 +208,7 @@ config SPI_FSL_SPI MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. config SPI_FSL_ESPI - tristate "Freescale eSPI controller" + bool "Freescale eSPI controller" depends on FSL_SOC select SPI_FSL_LIB help diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 024b48aed5ca..acc88b4d2869 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -13,6 +13,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index e093d3ec41ba..0094c645ff0d 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi) spi_bitbang_cleanup(spi); } -static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) +static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in) { int value; @@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) return value; } -static int __init +static int __devinit spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, u16 *res_flags) { diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c index 21c70b2b8311..182e9c873822 100644 --- a/drivers/spi/spi-nuc900.c +++ b/drivers/spi/spi-nuc900.c @@ -8,6 +8,7 @@ * */ +#include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/workqueue.h> diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 21d8c1c16cd8..5e78c77d5a08 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev, } insns = - kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL); + kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL); if (!insns) { DPRINTK("kmalloc failed\n"); ret = -ENOMEM; @@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s) return ret; } -static void comedi_unmap(struct vm_area_struct *area) + +static void comedi_vm_open(struct vm_area_struct *area) +{ + struct comedi_async *async; + struct comedi_device *dev; + + async = area->vm_private_data; + dev = async->subdevice->device; + + mutex_lock(&dev->mutex); + async->mmap_count++; + mutex_unlock(&dev->mutex); +} + +static void comedi_vm_close(struct vm_area_struct *area) { struct comedi_async *async; struct comedi_device *dev; @@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area) } static struct vm_operations_struct comedi_vm_ops = { - .close = comedi_unmap, + .open = comedi_vm_open, + .close = comedi_vm_close, }; static int comedi_mmap(struct file *file, struct vm_area_struct *vma) { const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; struct comedi_async *async = NULL; unsigned long start = vma->vm_start; unsigned long size; @@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma) int i; int retval; struct comedi_subdevice *s; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + + dev_file_info = comedi_get_device_file_info(minor); + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; mutex_lock(&dev->mutex); if (!dev->attached) { @@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait) { unsigned int mask = 0; const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; struct comedi_subdevice *read_subdev; struct comedi_subdevice *write_subdev; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; mutex_lock(&dev->mutex); if (!dev->attached) { @@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, int n, m, count = 0, retval = 0; DECLARE_WAITQUEUE(wait, current); const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; if (!dev->attached) { DPRINTK("no driver configured on comedi%i\n", dev->minor); @@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, retval = -EAGAIN; break; } + schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } - schedule(); if (!s->busy) break; if (s->busy != file) { @@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, int n, m, count = 0, retval = 0; DECLARE_WAITQUEUE(wait, current); const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; if (!dev->attached) { DPRINTK("no driver configured on comedi%i\n", dev->minor); @@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, retval = -EAGAIN; break; } + schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } - schedule(); if (!s->busy) { retval = 0; break; @@ -1885,11 +1924,17 @@ ok: static int comedi_close(struct inode *inode, struct file *file) { const unsigned minor = iminor(inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; struct comedi_subdevice *s = NULL; int i; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; mutex_lock(&dev->mutex); @@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file) static int comedi_fasync(int fd, struct file *file, int on) { const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; return fasync_helper(fd, file, on, &dev->async_queue); } diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index a8fea9a91733..6144afb8cbaa 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c @@ -1,4 +1,4 @@ -#define DRIVER_VERSION "v0.5" +#define DRIVER_VERSION "v0.6" #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com" #define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com" /* @@ -25,7 +25,7 @@ Driver: usbduxsigma Description: University of Stirling USB DAQ & INCITE Technology Limited Devices: [ITL] USB-DUX (usbduxsigma.o) Author: Bernd Porr <BerndPorr@f2s.com> -Updated: 21 Jul 2011 +Updated: 8 Nov 2011 Status: testing */ /* @@ -44,6 +44,7 @@ Status: testing * 0.3: proper vendor ID and driver name * 0.4: fixed D/A voltage range * 0.5: various bug fixes, health check at startup + * 0.6: corrected wrong input range */ /* generates loads of debug info */ @@ -175,7 +176,7 @@ Status: testing /* comedi constants */ static const struct comedi_lrange range_usbdux_ai_range = { 1, { BIP_RANGE - (2.65) + (2.65/2.0) } }; diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c index 480b0ed2e4de..115635f95024 100644 --- a/drivers/staging/rts_pstor/rtsx.c +++ b/drivers/staging/rts_pstor/rtsx.c @@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci, th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start the device-scanning thread\n"); + complete(&dev->scanning_done); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c index 09c44abb89e8..3872b8cccdcf 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/staging/usbip/vhci_rx.c @@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, { struct usbip_device *ud = &vdev->ud; struct urb *urb; + unsigned long flags; spin_lock(&vdev->priv_lock); urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); @@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, usbip_dbg_vhci_rx("now giveback urb %p\n", urb); - spin_lock(&the_controller->lock); + spin_lock_irqsave(&the_controller->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); - spin_unlock(&the_controller->lock); + spin_unlock_irqrestore(&the_controller->lock, flags); usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); @@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, { struct vhci_unlink *unlink; struct urb *urb; + unsigned long flags; usbip_dump_header(pdu); @@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, urb->status = pdu->u.ret_unlink.status; pr_info("urb->status %d\n", urb->status); - spin_lock(&the_controller->lock); + spin_lock_irqsave(&the_controller->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); - spin_unlock(&the_controller->lock); + spin_unlock_irqrestore(&the_controller->lock, flags); usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 0fd96c10271d..8599545cdf9e 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -614,13 +614,12 @@ int iscsit_add_reject( hdr = (struct iscsi_reject *) cmd->pdu; hdr->reason = reason; - cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); + cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); iscsit_release_cmd(cmd); return -1; } - memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN); spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_list, &conn->conn_cmd_list); @@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd( hdr = (struct iscsi_reject *) cmd->pdu; hdr->reason = reason; - cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); + cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); iscsit_release_cmd(cmd); return -1; } - memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN); if (add_to_conn) { spin_lock_bh(&conn->cmd_lock); @@ -1017,11 +1015,6 @@ done: " non-existent or non-exported iSCSI LUN:" " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); } - if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); - send_check_condition = 1; goto attach_cmd; } @@ -1044,6 +1037,8 @@ done: */ send_check_condition = 1; } else { + cmd->data_length = cmd->se_cmd.data_length; + if (iscsit_decide_list_to_build(cmd, payload_length) < 0) return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, @@ -1123,7 +1118,7 @@ attach_cmd: * the backend memory allocation. */ ret = transport_generic_new_cmd(&cmd->se_cmd); - if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) { + if (ret < 0) { immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; dump_immediate_data = 1; goto after_immediate_data; @@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) spin_lock_irqsave(&se_cmd->t_state_lock, flags); if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || - (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED)) + (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) dump_unsolicited_data = 1; spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); @@ -2513,10 +2508,10 @@ static int iscsit_send_data_in( if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } } hton24(hdr->dlength, datain.length); @@ -3018,10 +3013,10 @@ static int iscsit_send_status( hdr->flags |= ISCSI_FLAG_CMD_FINAL; if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } hdr->response = cmd->iscsi_response; hdr->cmd_status = cmd->se_cmd.scsi_status; @@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp( hdr = (struct iscsi_tm_rsp *) cmd->pdu; memset(hdr, 0, ISCSI_HDR_LEN); hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; + hdr->flags = ISCSI_FLAG_CMD_FINAL; hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); hdr->itt = cpu_to_be32(cmd->init_task_tag); cmd->stat_sn = conn->stat_sn++; diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index beb39469e7f1..1cd6ce373b83 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -30,9 +30,11 @@ static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) { - int j = DIV_ROUND_UP(len, 2); + int j = DIV_ROUND_UP(len, 2), rc; - hex2bin(dst, src, j); + rc = hex2bin(dst, src, j); + if (rc < 0) + pr_debug("CHAP string contains non hex digit symbols\n"); dst[j] = '\0'; return j; diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 3723d90d5ae5..f1a02dad05a0 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -398,7 +398,6 @@ struct iscsi_cmd { u32 pdu_send_order; /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */ u32 pdu_start; - u32 residual_count; /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */ u32 seq_send_order; /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */ @@ -535,7 +534,6 @@ struct iscsi_conn { atomic_t connection_exit; atomic_t connection_recovery; atomic_t connection_reinstatement; - atomic_t connection_wait; atomic_t connection_wait_rcfr; atomic_t sleep_on_conn_wait_comp; atomic_t transport_failed; @@ -643,7 +641,6 @@ struct iscsi_session { atomic_t session_reinstatement; atomic_t session_stop_active; atomic_t sleep_on_sess_wait_comp; - atomic_t transport_wait_cmds; /* connection list */ struct list_head sess_conn_list; struct list_head cr_active_list; diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index c4c68da3e500..101b1beb3bca 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well. */ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { - if (se_cmd->se_cmd_flags & - SCF_SCSI_RESERVATION_CONFLICT) { + if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) { cmd->i_state = ISTATE_SEND_STATUS; spin_unlock_bh(&cmd->istate_lock); iscsit_add_cmd_to_response_queue(cmd, cmd->conn, diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index daad362a93ce..d734bdec24f9 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1( iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); pr_err("Could not allocate memory for session\n"); - return -1; + return -ENOMEM; } iscsi_login_set_conn_values(sess, conn, pdu->cid); @@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1( pr_err("idr_pre_get() for sess_idr failed\n"); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - return -1; + kfree(sess); + return -ENOMEM; } spin_lock(&sess_idr_lock); idr_get_new(&sess_idr, NULL, &sess->session_index); @@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1( ISCSI_LOGIN_STATUS_NO_RESOURCES); pr_err("Unable to allocate memory for" " struct iscsi_sess_ops.\n"); - return -1; + kfree(sess); + return -ENOMEM; } sess->se_sess = transport_init_session(); - if (!sess->se_sess) { + if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - return -1; + kfree(sess); + return -ENOMEM; } return 0; diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 426cd4bf6a9a..98936cb7c294 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation( return NULL; } - login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); + login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL); if (!login->req) { pr_err("Unable to allocate memory for Login Request.\n"); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); goto out; } - memcpy(login->req, login_pdu, ISCSI_HDR_LEN); login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); if (!login->req_buf) { diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 3df1c9b8ae6b..81d5832fbbd5 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, &tl_cmd->tl_sense_buf[0]); - /* - * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi - */ if (scsi_bidi_cmnd(sc)) - se_cmd->t_tasks_bidi = 1; + se_cmd->se_cmd_flags |= SCF_BIDI; + /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ @@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) * Allocate the necessary tasks to complete the received CDB+data */ ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); - if (ret == -ENOMEM) { - /* Out of Resources */ - return PYX_TRANSPORT_LU_COMM_FAILURE; - } else if (ret == -EINVAL) { - /* - * Handle case for SAM_STAT_RESERVATION_CONFLICT - */ - if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) - return PYX_TRANSPORT_RESERVATION_CONFLICT; - /* - * Otherwise, return SAM_STAT_CHECK_CONDITION and return - * sense data. - */ - return PYX_TRANSPORT_USE_SENSE_REASON; - } - + if (ret != 0) + return ret; /* * For BIDI commands, pass in the extra READ buffer * to transport_generic_map_mem_to_cmd() below.. */ - if (se_cmd->t_tasks_bidi) { + if (se_cmd->se_cmd_flags & SCF_BIDI) { struct scsi_data_buffer *sdb = scsi_in(sc); sgl_bidi = sdb->table.sgl; @@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) } /* Tell the core about our preallocated memory */ - ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), + return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); - if (ret < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; - - return 0; } /* @@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba( { struct tcm_loop_hba *tl_hba = container_of(wwn, struct tcm_loop_hba, tl_hba_wwn); - int host_no = tl_hba->sh->host_no; + + pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" + " SAS Address: %s at Linux/SCSI Host ID: %d\n", + tl_hba->tl_wwn_address, tl_hba->sh->host_no); /* * Call device_unregister() on the original tl_hba->dev. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will * release *tl_hba; */ device_unregister(&tl_hba->dev); - - pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target" - " SAS Address: %s at Linux/SCSI Host ID: %d\n", - config_item_name(&wwn->wwn_group.cg_item), host_no); } /* Start items for tcm_loop_cit */ diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 88f2ad43ec8b..1dcbef499d6a 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task) int alua_access_state, primary = 0, rc; u16 tg_pt_id, rtpi; - if (!l_port) - return PYX_TRANSPORT_LU_COMM_FAILURE; - + if (!l_port) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; + } buf = transport_kmap_first_data_page(cmd); /* @@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; if (!l_tg_pt_gp_mem) { pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); - rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + rc = -EINVAL; goto out; } spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) if (!l_tg_pt_gp) { spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); - rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + rc = -EINVAL; goto out; } rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); @@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) if (!rc) { pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICT_ALUA is disabled\n"); - rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + rc = -EINVAL; goto out; } @@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) * REQUEST, and the additional sense code set to INVALID * FIELD IN PARAMETER LIST. */ - rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + rc = -EINVAL; goto out; } rc = -1; @@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) * throw an exception with ASCQ: INVALID_PARAMETER_LIST */ if (rc != 0) { - rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + rc = -EINVAL; goto out; } } else { @@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) * INVALID_PARAMETER_LIST */ if (rc != 0) { - rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + rc = -EINVAL; goto out; } } @@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * struct t10_alua_lu_gp. */ spin_lock(&lu_gps_lock); - atomic_set(&lu_gp->lu_gp_shutdown, 1); list_del(&lu_gp->lu_gp_node); alua_lu_gps_count--; spin_unlock(&lu_gps_lock); @@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( tg_pt_gp_mem->tg_pt = port; port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; - atomic_set(&port->sep_tg_pt_gp_active, 1); return tg_pt_gp_mem; } diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 683ba02b8247..831468b3163d 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) if (cmd->data_length < 60) return 0; - buf[2] = 0x3c; + buf[3] = 0x3c; /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; @@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task) if (cmd->data_length < 4) { pr_err("SCSI Inquiry payload length: %u" " too small for EVPD=1\n", cmd->data_length); + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } @@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task) } pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; ret = -EINVAL; out_unmap: @@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task) default: pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", cdb[2] & 0x3f, cdb[3]); - return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; + cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; + return -EINVAL; } offset += length; @@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task) if (cdb[1] & 0x01) { pr_err("REQUEST_SENSE description emulation not" " supported\n"); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -ENOSYS; } buf = transport_kmap_first_data_page(cmd); @@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task) if (!dev->transport->do_discard) { pr_err("UNMAP emulation not supported for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; } /* First UNMAP block descriptor starts at 8 byte offset */ @@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task) if (!dev->transport->do_discard) { pr_err("WRITE_SAME emulation not supported" " for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; } if (cmd->t_task_cdb[0] == WRITE_SAME) @@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task) int target_emulate_synchronize_cache(struct se_task *task) { struct se_device *dev = task->task_se_cmd->se_dev; + struct se_cmd *cmd = task->task_se_cmd; if (!dev->transport->do_sync_cache) { pr_err("SYNCHRONIZE_CACHE emulation not supported" " for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; } dev->transport->do_sync_cache(task); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index e0c1e8a8dd4e..93d4f6a1b798 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup; static struct config_group alua_group; static struct config_group alua_lu_gps_group; -static DEFINE_SPINLOCK(se_device_lock); -static LIST_HEAD(se_dev_list); - static inline struct se_hba * item_to_hba(struct config_item *item) { @@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev( " struct se_subsystem_dev\n"); goto unlock; } - INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); @@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev( " from allocate_virtdevice()\n"); goto out; } - spin_lock(&se_device_lock); - list_add_tail(&se_dev->se_dev_node, &se_dev_list); - spin_unlock(&se_device_lock); config_group_init_type_name(&se_dev->se_dev_group, name, &target_core_dev_cit); @@ -2874,10 +2867,6 @@ static void target_core_drop_subdev( mutex_lock(&hba->hba_access_mutex); t = hba->transport; - spin_lock(&se_device_lock); - list_del(&se_dev->se_dev_node); - spin_unlock(&se_device_lock); - dev_stat_grp = &se_dev->dev_stat_grps.stat_group; for (i = 0; dev_stat_grp->default_groups[i]; i++) { df_item = &dev_stat_grp->default_groups[i]->cg_item; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ba5edec2c5f8..9b8639425472 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } /* @@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_dev; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -708,7 +705,7 @@ done: se_task->task_scsi_status = GOOD; transport_complete_task(se_task, 1); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* se_release_device_for_hba(): @@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) return -EINVAL; } - pr_err("dpo_emulated not supported\n"); - return -EINVAL; + if (flag) { + pr_err("dpo_emulated not supported\n"); + return -EINVAL; + } + + return 0; } int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) @@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) return -EINVAL; } - if (dev->transport->fua_write_emulated == 0) { + if (flag && dev->transport->fua_write_emulated == 0) { pr_err("fua_write_emulated not supported\n"); return -EINVAL; } @@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) return -EINVAL; } - pr_err("ua read emulated not supported\n"); - return -EINVAL; + if (flag) { + pr_err("ua read emulated not supported\n"); + return -EINVAL; + } + + return 0; } int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) @@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->write_cache_emulated == 0) { + if (flag && dev->transport->write_cache_emulated == 0) { pr_err("write_cache_emulated not supported\n"); return -EINVAL; } @@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } @@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } @@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void) ret = -ENOMEM; goto out; } - INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 67cd6fe05bfa..b4864fba4ef0 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task) return -ENOMEM; } - for (i = 0; i < task->task_sg_nents; i++) { - iov[i].iov_len = sg[i].length; - iov[i].iov_base = sg_virt(&sg[i]); + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + iov[i].iov_len = sg->length; + iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); @@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task) return -ENOMEM; } - for (i = 0; i < task->task_sg_nents; i++) { - iov[i].iov_len = sg[i].length; - iov[i].iov_base = sg_virt(&sg[i]); + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + iov[i].iov_len = sg->length; + iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); @@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task) if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - cmd->t_tasks_fua) { + (cmd->se_cmd_flags & SCF_FUA)) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator @@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task) } - if (ret < 0) + if (ret < 0) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return ret; + } if (ret) { task->task_scsi_status = GOOD; transport_complete_task(task, 1); } - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* fd_free_task(): (Part of se_subsystem_api_t template) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7698efe29262..4aa992204438 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task) */ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - task->task_se_cmd->t_tasks_fua)) + (cmd->se_cmd_flags & SCF_FUA))) rw = WRITE_FUA; else rw = WRITE; @@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task) else { pr_err("Unsupported SCSI -> BLOCK LBA conversion:" " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOSYS; } bio = iblock_get_bio(task, block_lba, sg_num); - if (!bio) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + if (!bio) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; + } bio_list_init(&list); bio_list_add(&list, bio); @@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task) submit_bio(rw, bio); blk_finish_plug(&plug); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; fail: while ((bio = bio_list_pop(&list))) bio_put(bio); - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } static u32 iblock_get_device_rev(struct se_device *dev) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 5a4ebfc3a54f..95dee7074aeb 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret) pr_err("Received legacy SPC-2 RESERVE/RELEASE" " while active SPC-3 registrations exist," " returning RESERVATION_CONFLICT\n"); - *ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; return true; } @@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task) (cmd->t_task_cdb[1] & 0x02)) { pr_err("LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); - ret = PYX_TRANSPORT_ILLEGAL_REQUEST; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + ret = -EINVAL; goto out; } /* @@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task) " from %s \n", cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = -EINVAL; goto out_unlock; } @@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port( tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); if (!tidh_new) { pr_err("Unable to allocate tidh_new\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } INIT_LIST_HEAD(&tidh_new->dest_list); tidh_new->dest_tpg = tpg; @@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port( sa_res_key, all_tg_pt, aptpl); if (!local_pr_reg) { kfree(tidh_new); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } tidh_new->dest_pr_reg = local_pr_reg; /* @@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port( pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" " does not equal CDB data_length: %u\n", tpdl, cmd->data_length); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port( " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } /* @@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port( atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); core_scsi3_tpg_undepend_item(tmp_tpg); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } @@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port( if (!dest_tpg) { pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" " dest_tpg\n"); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } #if 0 @@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port( " %u for Transport ID: %s\n", tid_len, ptr); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port( core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port( smp_mb__after_atomic_dec(); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } #if 0 @@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port( core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -ENOMEM; goto out; } INIT_LIST_HEAD(&tidh_new->dest_list); @@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port( core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); kfree(tidh_new); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } tidh_new->dest_pr_reg = dest_pr_reg; @@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } se_tpg = se_sess->se_tpg; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; @@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register( if (res_key) { pr_warn("SPC-3 PR: Reservation Key non-zero" " for SA REGISTER, returning CONFLICT\n"); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * Do nothing but return GOOD status. */ if (!sa_res_key) - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; if (!spec_i_pt) { /* @@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register( if (ret != 0) { pr_err("Unable to allocate" " struct t10_pr_registration\n"); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } } else { /* @@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register( " 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } } if (spec_i_pt) { pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" " set while sa_res_key=0\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* * An existing ALL_TG_PT=1 registration being released @@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register( " registration exists, but ALL_TG_PT=1 bit not" " present in received PROUT\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } /* * Allocate APTPL metadata buffer used for UNREGISTER ops @@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register( pr_err("Unable to allocate" " pr_aptpl_buf\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } } /* @@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register( if (pr_holder < 0) { kfree(pr_aptpl_buf); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } spin_lock(&pr_tmpl->registration_lock); @@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } se_tpg = se_sess->se_tpg; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; @@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve( if (!pr_reg) { pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RESERVE\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve( " does not match existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve( if (scope != PR_SCOPE_LU_SCOPE) { pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* * See if we have an existing PR reservation holder pointer at @@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve( spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve( spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve( */ spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* * Otherwise, our *pr_reg becomes the PR reservation holder for said @@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve( default: pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" " 0x%02x\n", type); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } return ret; @@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * Locate the existing *pr_reg via struct se_node_acl pointers @@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release( if (!pr_reg) { pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RELEASE\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * From spc4r17 Section 5.7.11.2 Releasing: @@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release( */ spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) @@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release( */ spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* * From spc4r17 Section 5.7.11.2 Releasing: @@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release( " 0x%016Lx\n", res_key, pr_reg->pr_res_key); spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.11.2 Releasing and above: @@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release( spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * In response to a persistent reservation release request from the @@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear( if (!pr_reg_n) { pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for CLEAR\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * From spc4r17 section 5.7.11.6, Clearing: @@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear( " existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * a) Release the persistent reservation, if any; @@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt( int all_reg = 0, calling_it_nexus = 0, released_regs = 0; int prh_type = 0, prh_scope = 0, ret; - if (!se_sess) - return PYX_TRANSPORT_LU_COMM_FAILURE; + if (!se_sess) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; + } se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, @@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt( pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for PREEMPT%s\n", (abort) ? "_AND_ABORT" : ""); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } if (pr_reg_n->pr_res_key != res_key) { core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } if (scope != PR_SCOPE_LU_SCOPE) { pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } INIT_LIST_HEAD(&preempt_and_abort_list); @@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt( if (!all_reg && !sa_res_key) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* * From spc4r17, section 5.7.11.4.4 Removing Registrations: @@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt( if (!released_regs) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * For an existing all registrants type reservation @@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt( default: pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } return ret; @@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } memset(dest_iport, 0, 64); memset(i_buf, 0, PR_REG_ISID_ID_LEN); @@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move( if (!pr_reg) { pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" " *pr_reg for REGISTER_AND_MOVE\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * The provided reservation key much match the existing reservation key @@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move( " res_key: 0x%016Lx does not match existing SA REGISTER" " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * The service active reservation key needs to be non zero @@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move( pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" " sa_res_key\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* @@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move( " does not equal CDB data_length: %u\n", tid_len, cmd->data_length); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } spin_lock(&dev->se_port_lock); @@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move( atomic_dec(&dest_se_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } spin_lock(&dev->se_port_lock); @@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move( " fabric ops from Relative Target Port Identifier:" " %hu\n", rtpi); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move( " from fabric: %s\n", proto_ident, dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), dest_tf_ops->get_fabric_name()); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" " containg a valid tpg_parse_pr_out_transport_id" " function pointer\n"); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, @@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move( if (!initiator_str) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " initiator_str from Transport ID\n"); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move( pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" " matches: %s on received I_T Nexus\n", initiator_str, pr_reg_nacl->initiatorname); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { @@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move( " matches: %s %s on received I_T Nexus\n", initiator_str, iport_ptr, pr_reg_nacl->initiatorname, pr_reg->pr_reg_isid); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } after_iport_check: @@ -3517,7 +3576,8 @@ after_iport_check: pr_err("Unable to locate %s dest_node_acl for" " TransportID%s\n", dest_tf_ops->get_fabric_name(), initiator_str); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } ret = core_scsi3_nodeacl_depend_item(dest_node_acl); @@ -3527,7 +3587,8 @@ after_iport_check: atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); dest_node_acl = NULL; - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } #if 0 @@ -3543,7 +3604,8 @@ after_iport_check: if (!dest_se_deve) { pr_err("Unable to locate %s dest_se_deve from RTPI:" " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -3553,7 +3615,8 @@ after_iport_check: atomic_dec(&dest_se_deve->pr_ref_count); smp_mb__after_atomic_dec(); dest_se_deve = NULL; - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } #if 0 @@ -3572,7 +3635,8 @@ after_iport_check: pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" " currently held\n"); spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + ret = -EINVAL; goto out; } /* @@ -3585,7 +3649,8 @@ after_iport_check: pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" " Nexus is not reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = -EINVAL; goto out; } /* @@ -3603,7 +3668,8 @@ after_iport_check: " reservation for type: %s\n", core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = -EINVAL; goto out; } pr_res_nacl = pr_res_holder->pr_reg_nacl; @@ -3640,7 +3706,8 @@ after_iport_check: sa_res_key, 0, aptpl, 2, 1); if (ret != 0) { spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, @@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) pr_err("Received PERSISTENT_RESERVE CDB while legacy" " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = EINVAL; goto out; } @@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task) * FIXME: A NULL struct se_session pointer means an this is not coming from * a $FABRIC_MOD's nexus, but from internal passthrough ops. */ - if (!cmd->se_sess) - return PYX_TRANSPORT_LU_COMM_FAILURE; + if (!cmd->se_sess) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; + } if (cmd->data_length < 24) { pr_warn("SPC-PR: Received PR OUT parameter list" " length too small: %u\n", cmd->data_length); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) * SPEC_I_PT=1 is only valid for Service action: REGISTER */ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) { - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) (cmd->data_length != 24)) { pr_warn("SPC-PR: Received PR OUT illegal parameter" " list length: %u\n", cmd->data_length); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) default: pr_err("Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); - ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + ret = -EINVAL; break; } @@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) if (cmd->data_length < 8) { pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" " too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) if (cmd->data_length < 8) { pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" " too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) if (cmd->data_length < 6) { pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" " %u too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) if (cmd->data_length < 8) { pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" " too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task) pr_err("Received PERSISTENT_RESERVE CDB while legacy" " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } switch (cmd->t_task_cdb[1] & 0x1f) { @@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task) default: pr_err("Unknown PERSISTENT_RESERVE_IN service" " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); - ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + ret = -EINVAL; break; } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index ed32e1efe429..8b15e56b0384 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num) static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, struct bio **hbio) { + struct se_cmd *cmd = task->task_se_cmd; struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; u32 task_sg_num = task->task_sg_nents; struct bio *bio = NULL, *tbio = NULL; @@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, u32 data_len = task->task_size, i, len, bytes, off; int nr_pages = (task->task_size + task_sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; - int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + int nr_vecs = 0, rc; int rw = (task->task_data_direction == DMA_TO_DEVICE); *hbio = NULL; @@ -1058,11 +1059,13 @@ fail: bio->bi_next = NULL; bio_endio(bio, 0); /* XXX: should be error */ } - return ret; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } static int pscsi_do_task(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; struct pscsi_plugin_task *pt = PSCSI_TASK(task); struct request *req; @@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task) if (!req || IS_ERR(req)) { pr_err("PSCSI: blk_get_request() failed: %ld\n", req ? IS_ERR(req) : -ENOMEM); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENODEV; } } else { BUG_ON(!task->task_size); @@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task) * Setup the main struct request for the task->task_sg[] payload */ ret = pscsi_map_sg(task, task->task_sg, &hbio); - if (ret < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; + if (ret < 0) { + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return ret; + } req = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); @@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task) (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), pscsi_req_done); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; fail: while (hbio) { @@ -1124,7 +1132,8 @@ fail: bio->bi_next = NULL; bio_endio(bio, 0); /* XXX: should be error */ } - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } /* pscsi_get_sense_buffer(): @@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status( " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], pt->pscsi_result); task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - task->task_se_cmd->transport_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + task->task_se_cmd->scsi_sense_reason = + TCM_UNSUPPORTED_SCSI_OPCODE; transport_complete_task(task, 0); break; } diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 5158d3846f19..02e51faa2f4e 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } -/* rd_MEMCPY_read(): - * - * - */ -static int rd_MEMCPY_read(struct rd_request *req) +static int rd_MEMCPY(struct rd_request *req, u32 read_rd) { struct se_task *task = &req->rd_task; struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; - struct scatterlist *sg_d, *sg_s; - void *dst, *src; - u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; - u32 length, page_end = 0, table_sg_end; + struct scatterlist *rd_sg; + struct sg_mapping_iter m; u32 rd_offset = req->rd_offset; + u32 src_len; table = rd_get_sg_table(dev, req->rd_page); if (!table) return -EINVAL; - table_sg_end = (table->page_end_offset - req->rd_page); - sg_d = task->task_sg; - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; + rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; - pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" - " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, - req->rd_page, req->rd_offset); - - src_offset = rd_offset; + pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", + dev->rd_dev_id, read_rd ? "Read" : "Write", + task->task_lba, req->rd_size, req->rd_page, + rd_offset); + src_len = PAGE_SIZE - rd_offset; + sg_miter_start(&m, task->task_sg, task->task_sg_nents, + read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (req->rd_size) { - if ((sg_d[i].length - dst_offset) < - (sg_s[j].length - src_offset)) { - length = (sg_d[i].length - dst_offset); - - pr_debug("Step 1 - sg_d[%d]: %p length: %d" - " offset: %u sg_s[%d].length: %u\n", i, - &sg_d[i], sg_d[i].length, sg_d[i].offset, j, - sg_s[j].length); - pr_debug("Step 1 - length: %u dst_offset: %u" - " src_offset: %u\n", length, dst_offset, - src_offset); - - if (length > req->rd_size) - length = req->rd_size; - - dst = sg_virt(&sg_d[i++]) + dst_offset; - BUG_ON(!dst); - - src = sg_virt(&sg_s[j]) + src_offset; - BUG_ON(!src); - - dst_offset = 0; - src_offset = length; - page_end = 0; - } else { - length = (sg_s[j].length - src_offset); - - pr_debug("Step 2 - sg_d[%d]: %p length: %d" - " offset: %u sg_s[%d].length: %u\n", i, - &sg_d[i], sg_d[i].length, sg_d[i].offset, - j, sg_s[j].length); - pr_debug("Step 2 - length: %u dst_offset: %u" - " src_offset: %u\n", length, dst_offset, - src_offset); - - if (length > req->rd_size) - length = req->rd_size; - - dst = sg_virt(&sg_d[i]) + dst_offset; - BUG_ON(!dst); - - if (sg_d[i].length == length) { - i++; - dst_offset = 0; - } else - dst_offset = length; - - src = sg_virt(&sg_s[j++]) + src_offset; - BUG_ON(!src); - - src_offset = 0; - page_end = 1; - } + u32 len; + void *rd_addr; - memcpy(dst, src, length); + sg_miter_next(&m); + len = min((u32)m.length, src_len); + m.consumed = len; - pr_debug("page: %u, remaining size: %u, length: %u," - " i: %u, j: %u\n", req->rd_page, - (req->rd_size - length), length, i, j); + rd_addr = sg_virt(rd_sg) + rd_offset; - req->rd_size -= length; - if (!req->rd_size) - return 0; + if (read_rd) + memcpy(m.addr, rd_addr, len); + else + memcpy(rd_addr, m.addr, len); - if (!page_end) + req->rd_size -= len; + if (!req->rd_size) continue; - if (++req->rd_page <= table->page_end_offset) { - pr_debug("page: %u in same page table\n", - req->rd_page); + src_len -= len; + if (src_len) { + rd_offset += len; continue; } - pr_debug("getting new page table for page: %u\n", - req->rd_page); - - table = rd_get_sg_table(dev, req->rd_page); - if (!table) - return -EINVAL; - - sg_s = &table->sg_table[j = 0]; - } - - return 0; -} - -/* rd_MEMCPY_write(): - * - * - */ -static int rd_MEMCPY_write(struct rd_request *req) -{ - struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; - struct rd_dev_sg_table *table; - struct scatterlist *sg_d, *sg_s; - void *dst, *src; - u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; - u32 length, page_end = 0, table_sg_end; - u32 rd_offset = req->rd_offset; - - table = rd_get_sg_table(dev, req->rd_page); - if (!table) - return -EINVAL; - - table_sg_end = (table->page_end_offset - req->rd_page); - sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; - sg_s = task->task_sg; - - pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u," - " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, - req->rd_page, req->rd_offset); - - dst_offset = rd_offset; - - while (req->rd_size) { - if ((sg_s[i].length - src_offset) < - (sg_d[j].length - dst_offset)) { - length = (sg_s[i].length - src_offset); - - pr_debug("Step 1 - sg_s[%d]: %p length: %d" - " offset: %d sg_d[%d].length: %u\n", i, - &sg_s[i], sg_s[i].length, sg_s[i].offset, - j, sg_d[j].length); - pr_debug("Step 1 - length: %u src_offset: %u" - " dst_offset: %u\n", length, src_offset, - dst_offset); - - if (length > req->rd_size) - length = req->rd_size; - - src = sg_virt(&sg_s[i++]) + src_offset; - BUG_ON(!src); - - dst = sg_virt(&sg_d[j]) + dst_offset; - BUG_ON(!dst); - - src_offset = 0; - dst_offset = length; - page_end = 0; - } else { - length = (sg_d[j].length - dst_offset); - - pr_debug("Step 2 - sg_s[%d]: %p length: %d" - " offset: %d sg_d[%d].length: %u\n", i, - &sg_s[i], sg_s[i].length, sg_s[i].offset, - j, sg_d[j].length); - pr_debug("Step 2 - length: %u src_offset: %u" - " dst_offset: %u\n", length, src_offset, - dst_offset); - - if (length > req->rd_size) - length = req->rd_size; - - src = sg_virt(&sg_s[i]) + src_offset; - BUG_ON(!src); - - if (sg_s[i].length == length) { - i++; - src_offset = 0; - } else - src_offset = length; - - dst = sg_virt(&sg_d[j++]) + dst_offset; - BUG_ON(!dst); - - dst_offset = 0; - page_end = 1; - } - - memcpy(dst, src, length); - - pr_debug("page: %u, remaining size: %u, length: %u," - " i: %u, j: %u\n", req->rd_page, - (req->rd_size - length), length, i, j); - - req->rd_size -= length; - if (!req->rd_size) - return 0; - - if (!page_end) - continue; - - if (++req->rd_page <= table->page_end_offset) { - pr_debug("page: %u in same page table\n", - req->rd_page); + /* rd page completed, next one please */ + req->rd_page++; + rd_offset = 0; + src_len = PAGE_SIZE; + if (req->rd_page <= table->page_end_offset) { + rd_sg++; continue; } - pr_debug("getting new page table for page: %u\n", - req->rd_page); - table = rd_get_sg_table(dev, req->rd_page); - if (!table) + if (!table) { + sg_miter_stop(&m); return -EINVAL; + } - sg_d = &table->sg_table[j = 0]; + /* since we increment, the first sg entry is correct */ + rd_sg = table->sg_table; } - + sg_miter_stop(&m); return 0; } @@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task) { struct se_device *dev = task->task_se_cmd->se_dev; struct rd_request *req = RD_REQ(task); - unsigned long long lba; + u64 tmp; int ret; - req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; - lba = task->task_lba; - req->rd_offset = (do_div(lba, - (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) * - dev->se_sub_dev->se_dev_attrib.block_size; + tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; + req->rd_offset = do_div(tmp, PAGE_SIZE); + req->rd_page = tmp; req->rd_size = task->task_size; - if (task->task_data_direction == DMA_FROM_DEVICE) - ret = rd_MEMCPY_read(req); - else - ret = rd_MEMCPY_write(req); - + ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE); if (ret != 0) return ret; task->task_scsi_status = GOOD; transport_complete_task(task, 1); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* rd_free_task(): (Part of se_subsystem_api_t template) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 217e29df6297..684522805a1f 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list( " %d t_fe_count: %d\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, cmd->t_state, atomic_read(&cmd->t_fe_count)); - /* - * Signal that the command has failed via cmd->se_cmd_flags, - */ - transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, atomic_read(&cmd->t_fe_count)); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3400ae6e93f8..0257658e2e3e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -61,7 +61,6 @@ static int sub_api_initialized; static struct workqueue_struct *target_completion_wq; -static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; struct kmem_cache *se_ua_cache; @@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_generic_request_failure(struct se_cmd *, int, int); +static void transport_generic_request_failure(struct se_cmd *); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) { - se_cmd_cache = kmem_cache_create("se_cmd_cache", - sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); - if (!se_cmd_cache) { - pr_err("kmem_cache_create for struct se_cmd failed\n"); - goto out; - } se_tmr_req_cache = kmem_cache_create("se_tmr_cache", sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 0, NULL); if (!se_tmr_req_cache) { pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); - goto out_free_cmd_cache; + goto out; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), @@ -182,8 +175,6 @@ out_free_sess_cache: kmem_cache_destroy(se_sess_cache); out_free_tmr_req_cache: kmem_cache_destroy(se_tmr_req_cache); -out_free_cmd_cache: - kmem_cache_destroy(se_cmd_cache); out: return -ENOMEM; } @@ -191,7 +182,6 @@ out: void release_se_kmem_caches(void) { destroy_workqueue(target_completion_wq); - kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); @@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) task->task_scsi_status = GOOD; } else { task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; - task->task_se_cmd->transport_error_status = - PYX_TRANSPORT_ILLEGAL_REQUEST; + task->task_se_cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } transport_complete_task(task, good); @@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); - transport_generic_request_failure(cmd, 1, 1); + transport_generic_request_failure(cmd); } /* transport_complete_task(): @@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success) if (cmd->t_tasks_failed) { if (!task->task_error_status) { task->task_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - cmd->transport_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } + INIT_WORK(&cmd->work, target_complete_failure_work); } else { atomic_set(&cmd->t_transport_complete, 1); @@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba( dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; - atomic_set(&dev->active_cmds, 0); INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); INIT_LIST_HEAD(&dev->execute_task_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); - INIT_LIST_HEAD(&dev->ordered_cmd_list); INIT_LIST_HEAD(&dev->state_task_list); INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); - spin_lock_init(&dev->ordered_cmd_lock); - spin_lock_init(&dev->state_task_lock); - spin_lock_init(&dev->dev_alua_lock); spin_lock_init(&dev->dev_reservation_lock); spin_lock_init(&dev->dev_status_lock); - spin_lock_init(&dev->dev_status_thr_lock); spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); spin_lock_init(&dev->qf_cmd_lock); @@ -1507,7 +1492,6 @@ void transport_init_se_cmd( { INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); - INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->se_cmd_list); @@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks( pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } /* @@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks( " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), (unsigned long)sizeof(cmd->__t_task_cdb)); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -ENOMEM; } } else @@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct( * and call transport_generic_request_failure() if necessary.. */ ret = transport_generic_new_cmd(cmd); - if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, 0, - (cmd->data_direction != DMA_TO_DEVICE)); - } + if (ret < 0) + transport_generic_request_failure(cmd); + return 0; } EXPORT_SYMBOL(transport_handle_cdb_direct); @@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) /* * Handle SAM-esque emulation for generic transport request failures. */ -static void transport_generic_request_failure( - struct se_cmd *cmd, - int complete, - int sc) +static void transport_generic_request_failure(struct se_cmd *cmd) { int ret = 0; pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->t_task_cdb[0]); - pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", + pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, - cmd->transport_error_status); + cmd->t_state, cmd->scsi_sense_reason); pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" @@ -1829,46 +1812,19 @@ static void transport_generic_request_failure( if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); - if (complete) { - cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - } - - switch (cmd->transport_error_status) { - case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - break; - case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: - cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; - break; - case PYX_TRANSPORT_INVALID_CDB_FIELD: - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - break; - case PYX_TRANSPORT_INVALID_PARAMETER_LIST: - cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; - break; - case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: - if (!sc) - transport_new_cmd_failure(cmd); - /* - * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, - * we force this session to fall back to session - * recovery. - */ - cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); - cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); - - goto check_stop; - case PYX_TRANSPORT_LU_COMM_FAILURE: - case PYX_TRANSPORT_ILLEGAL_REQUEST: - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - break; - case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: - cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; - break; - case PYX_TRANSPORT_WRITE_PROTECTED: - cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + switch (cmd->scsi_sense_reason) { + case TCM_NON_EXISTENT_LUN: + case TCM_UNSUPPORTED_SCSI_OPCODE: + case TCM_INVALID_CDB_FIELD: + case TCM_INVALID_PARAMETER_LIST: + case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: + case TCM_UNKNOWN_MODE_PAGE: + case TCM_WRITE_PROTECTED: + case TCM_CHECK_CONDITION_ABORT_CMD: + case TCM_CHECK_CONDITION_UNIT_ATTENTION: + case TCM_CHECK_CONDITION_NOT_READY: break; - case PYX_TRANSPORT_RESERVATION_CONFLICT: + case TCM_RESERVATION_CONFLICT: /* * No SENSE Data payload for this case, set SCSI Status * and queue the response to $FABRIC_MOD. @@ -1893,15 +1849,9 @@ static void transport_generic_request_failure( if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; goto check_stop; - case PYX_TRANSPORT_USE_SENSE_REASON: - /* - * struct se_cmd->scsi_sense_reason already set - */ - break; default: pr_err("Unknown transport error for CDB 0x%02x: %d\n", - cmd->t_task_cdb[0], - cmd->transport_error_status); + cmd->t_task_cdb[0], cmd->scsi_sense_reason); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; } @@ -1912,14 +1862,10 @@ static void transport_generic_request_failure( * transport_send_check_condition_and_sense() after handling * possible unsoliticied write data payloads. */ - if (!sc && !cmd->se_tfo->new_cmd_map) - transport_new_cmd_failure(cmd); - else { - ret = transport_send_check_condition_and_sense(cmd, - cmd->scsi_sense_reason, 0); - if (ret == -EAGAIN || ret == -ENOMEM) - goto queue_full; - } + ret = transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; check_stop: transport_lun_remove_cmd(cmd); @@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&cmd->se_dev->dev_hoq_count); - smp_mb__after_atomic_inc(); pr_debug("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", cmd->t_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&cmd->se_dev->ordered_cmd_lock); - list_add_tail(&cmd->se_ordered_node, - &cmd->se_dev->ordered_cmd_list); - spin_unlock(&cmd->se_dev->ordered_cmd_lock); - atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); @@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; - if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { - cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, 0, 1); + if (se_dev_check_online(cmd->se_dev) != 0) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + transport_generic_request_failure(cmd); return 0; } @@ -2163,14 +2102,13 @@ check_depth: else error = dev->transport->do_task(task); if (error != 0) { - cmd->transport_error_status = error; spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); atomic_inc(&dev->depth_left); - transport_generic_request_failure(cmd, 0, 1); + transport_generic_request_failure(cmd); } goto check_depth; @@ -2178,19 +2116,6 @@ check_depth: return 0; } -void transport_new_cmd_failure(struct se_cmd *se_cmd) -{ - unsigned long flags; - /* - * Any unsolicited data will get dumped for failed command inside of - * the fabric plugin - */ - spin_lock_irqsave(&se_cmd->t_state_lock, flags); - se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); -} - static inline u32 transport_get_sectors_6( unsigned char *cdb, struct se_cmd *cmd, @@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6( /* * Everything else assume TYPE_DISK Sector CDB location. - * Use 8-bit sector value. + * Use 8-bit sector value. SBC-3 says: + * + * A TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be written. Any other value + * specifies the number of logical blocks that shall be + * written. */ type_disk: - return (u32)cdb[4]; + return cdb[4] ? : 256; } static inline u32 transport_get_sectors_10( @@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) return -1; } -static int -transport_handle_reservation_conflict(struct se_cmd *cmd) -{ - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; - /* - * For UA Interlock Code 11b, a RESERVATION CONFLICT will - * establish a UNIT ATTENTION with PREVIOUS RESERVATION - * CONFLICT STATUS. - * - * See spc4r17, section 7.4.6 Control Mode Page, Table 349 - */ - if (cmd->se_sess && - cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, - cmd->orig_fe_lun, 0x2C, - ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - return -EINVAL; -} - static inline long long transport_dev_end_lba(struct se_device *dev) { return dev->transport->get_blocks(dev) + 1; @@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer( */ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( - cmd, cdb, pr_reg_type) != 0) - return transport_handle_reservation_conflict(cmd); + cmd, cdb, pr_reg_type) != 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EBUSY; + } /* * This means the CDB is allowed for the SCSI Initiator port * when said port is *NOT* holding the legacy SPC-2 or @@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_32(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_32(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_64(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(cmd->t_tasks_bidi)) + !(cmd->se_cmd_flags & SCF_BIDI)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) @@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run after I/O completion. */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer( * completion. */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_tasks_fua = (cdb[10] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); @@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd) " SIMPLE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_dec(&dev->dev_hoq_count); - smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for" " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&dev->ordered_cmd_lock); - list_del(&cmd->se_ordered_node); atomic_dec(&dev->dev_ordered_sync); smp_mb__after_atomic_dec(); - spin_unlock(&dev->ordered_cmd_lock); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd( if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { + /* + * Reject SCSI data overflow with map_mem_to_cmd() as incoming + * scatterlists already have been set to follow what the fabric + * passes for the original expected data transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + pr_warn("Rejecting SCSI DATA overflow for fabric using" + " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; + } cmd->t_data_sg = sgl; cmd->t_data_nents = sgl_count; @@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd) cmd->data_length) { ret = transport_generic_get_mem(cmd); if (ret < 0) - return ret; + goto out_fail; } /* @@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd) task_cdbs = transport_allocate_control_task(cmd); } - if (task_cdbs <= 0) + if (task_cdbs < 0) goto out_fail; + else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { + cmd->t_state = TRANSPORT_COMPLETE; + atomic_set(&cmd->t_transport_active, 1); + INIT_WORK(&cmd->work, target_complete_ok_work); + queue_work(target_completion_wq, &cmd->work); + return 0; + } if (set_counts) { atomic_inc(&cmd->t_fe_count); @@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) else if (ret < 0) return ret; - return PYX_TRANSPORT_WRITE_PENDING; + return 1; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); @@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { atomic_inc(&cmd->t_transport_aborted); smp_mb__after_atomic_inc(); - cmd->scsi_status = SAM_STAT_TASK_ABORTED; - transport_new_cmd_failure(cmd); - return; } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; @@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param) struct se_cmd *cmd; struct se_device *dev = (struct se_device *) param; - set_user_nice(current, -20); - while (!kthread_should_stop()) { ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, atomic_read(&dev->dev_queue_obj.queue_cnt) || @@ -4698,18 +4625,13 @@ get_cmd: } ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, - 0, (cmd->data_direction != - DMA_TO_DEVICE)); + transport_generic_request_failure(cmd); break; } ret = transport_generic_new_cmd(cmd); if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, - 0, (cmd->data_direction != - DMA_TO_DEVICE)); + transport_generic_request_failure(cmd); + break; } break; case TRANSPORT_PROCESS_WRITE: diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 4fac37c4c615..71fc9cea5dc9 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd) lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + return -ENOMEM; /* Signal QUEUE_FULL */ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 5f770412ca40..9402b7387cac 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn) struct ft_lport_acl *lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); - pr_debug("del lport %s\n", - config_item_name(&wwn->wwn_group.cg_item)); + pr_debug("del lport %s\n", lacl->name); mutex_lock(&ft_lport_lock); list_del(&lacl->list); mutex_unlock(&ft_lport_lock); diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c index 4730016d7cd4..45f422ac103f 100644 --- a/drivers/usb/gadget/amd5536udc.c +++ b/drivers/usb/gadget/amd5536udc.c @@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver, u32 tmp; if (!driver || !bind || !driver->setup - || driver->speed != USB_SPEED_HIGH) + || driver->speed < USB_SPEED_HIGH) return -EINVAL; if (!dev) return -ENODEV; diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c index 91fdf790ed20..cf33a8d0fd5d 100644 --- a/drivers/usb/gadget/f_serial.c +++ b/drivers/usb/gadget/f_serial.c @@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } if (!gser->port.in->desc || !gser->port.out->desc) { DBG(cdev, "activate generic ttyGS%d\n", gser->port_num); - if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) || - !config_ep_by_speed(cdev->gadget, f, gser->port.out)) { + if (config_ep_by_speed(cdev->gadget, f, gser->port.in) || + config_ep_by_speed(cdev->gadget, f, gser->port.out)) { gser->port.in->desc = NULL; gser->port.out->desc = NULL; return -EINVAL; diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c index 43a49ecc1f36..dcbc0a2e48dd 100644 --- a/drivers/usb/gadget/fsl_mxc_udc.c +++ b/drivers/usb/gadget/fsl_mxc_udc.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/fsl_devices.h> #include <linux/platform_device.h> +#include <linux/io.h> #include <mach/hardware.h> @@ -88,7 +89,6 @@ eenahb: void fsl_udc_clk_finalize(struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; -#if defined(CONFIG_SOC_IMX35) if (cpu_is_mx35()) { unsigned int v; @@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev) USBPHYCTRL_OTGBASE_OFFSET)); } } -#endif /* ULPI transceivers don't need usbpll */ if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 2a03e4de11c1..e00cf92409ce 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver, if (!udc_controller) return -ENODEV; - if (!driver || (driver->speed != USB_SPEED_FULL - && driver->speed != USB_SPEED_HIGH) + if (!driver || driver->speed < USB_SPEED_FULL || !bind || !driver->disconnect || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index b3b3d83b7c33..dd28ef3def71 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req) kfree(req); } -/*-------------------------------------------------------------------------*/ +/* Actually add a dTD chain to an empty dQH and let go */ +static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td) +{ + struct ep_queue_head *qh = get_qh_by_ep(ep); + + /* Write dQH next pointer and terminate bit to 0 */ + qh->next_dtd_ptr = cpu_to_hc32(td->td_dma + & EP_QUEUE_HEAD_NEXT_POINTER_MASK); + + /* Clear active and halt bit */ + qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE + | EP_QUEUE_HEAD_STATUS_HALT)); + + /* Ensure that updates to the QH will occur before priming. */ + wmb(); + + /* Prime endpoint by writing correct bit to ENDPTPRIME */ + fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16)) + : (1 << (ep_index(ep))), &dr_regs->endpointprime); +} + +/* Add dTD chain to the dQH of an EP */ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) { - int i = ep_index(ep) * 2 + ep_is_in(ep); u32 temp, bitmask, tmp_stat; - struct ep_queue_head *dQH = &ep->udc->ep_qh[i]; /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr); VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */ @@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK); /* Read prime bit, if 1 goto done */ if (fsl_readl(&dr_regs->endpointprime) & bitmask) - goto out; + return; do { /* Set ATDTW bit in USBCMD */ @@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd); if (tmp_stat) - goto out; + return; } - /* Write dQH next pointer and terminate bit to 0 */ - temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; - dQH->next_dtd_ptr = cpu_to_hc32(temp); - - /* Clear active and halt bit */ - temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE - | EP_QUEUE_HEAD_STATUS_HALT)); - dQH->size_ioc_int_sts &= temp; - - /* Ensure that updates to the QH will occur before priming. */ - wmb(); - - /* Prime endpoint by writing 1 to ENDPTPRIME */ - temp = ep_is_in(ep) - ? (1 << (ep_index(ep) + 16)) - : (1 << (ep_index(ep))); - fsl_writel(temp, &dr_regs->endpointprime); -out: - return; + fsl_prime_ep(ep, req->head); } /* Fill in the dTD structure @@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) VDBG("%s, bad ep", __func__); return -EINVAL; } - if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { + if (usb_endpoint_xfer_isoc(ep->desc)) { if (req->req.length > ep->ep.maxpacket) return -EMSGSIZE; } @@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) /* The request isn't the last request in this ep queue */ if (req->queue.next != &ep->queue) { - struct ep_queue_head *qh; struct fsl_req *next_req; - qh = ep->qh; next_req = list_entry(req->queue.next, struct fsl_req, queue); - /* Point the QH to the first TD of next request */ - fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr); + /* prime with dTD of next request */ + fsl_prime_ep(ep, next_req->head); } - - /* The request hasn't been processed, patch up the TD chain */ + /* The request hasn't been processed, patch up the TD chain */ } else { struct fsl_req *prev_req; prev_req = list_entry(req->queue.prev, struct fsl_req, queue); - fsl_writel(fsl_readl(&req->tail->next_td_ptr), - &prev_req->tail->next_td_ptr); - + prev_req->tail->next_td_ptr = req->tail->next_td_ptr; } done(ep, req, -ECONNRESET); @@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value) goto out; } - if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { + if (usb_endpoint_xfer_isoc(ep->desc)) { status = -EOPNOTSUPP; goto out; } @@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep) struct fsl_udc *udc; int size = 0; u32 bitmask; - struct ep_queue_head *d_qh; + struct ep_queue_head *qh; ep = container_of(_ep, struct fsl_ep, ep); if (!_ep || (!ep->desc && ep_index(ep) != 0)) @@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep) if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; - d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)]; + qh = get_qh_by_ep(ep); bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) : (1 << (ep_index(ep))); if (fsl_readl(&dr_regs->endptstatus) & bitmask) - size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE) + size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS; pr_debug("%s %u\n", __func__, size); @@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver, if (!udc_controller) return -ENODEV; - if (!driver || (driver->speed != USB_SPEED_FULL - && driver->speed != USB_SPEED_HIGH) + if (!driver || driver->speed < USB_SPEED_FULL || !bind || !driver->disconnect || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h index 1d51be83fda8..f781f5dec417 100644 --- a/drivers/usb/gadget/fsl_usb2_udc.h +++ b/drivers/usb/gadget/fsl_usb2_udc.h @@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length) * 2 + ((windex & USB_DIR_IN) ? 1 : 0)) #define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP)) +static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep) +{ + /* we only have one ep0 structure but two queue heads */ + if (ep_index(ep) != 0) + return ep->qh; + else + return &ep->udc->ep_qh[(ep->udc->ep0_dir == + USB_DIR_IN) ? 1 : 0]; +} + struct platform_device; #ifdef CONFIG_ARCH_MXC int fsl_udc_clk_init(struct platform_device *pdev); diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 91d0af2a24a8..9aa1cbbee45b 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c @@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver, int retval; if (!driver - || driver->speed != USB_SPEED_HIGH + || driver->speed < USB_SPEED_HIGH || !bind || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index 7f1bc9a73cda..da2b9d0be3ca 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c @@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget, * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) * "must not be used in normal operation" */ - if (!driver || driver->speed != USB_SPEED_HIGH + if (!driver || driver->speed < USB_SPEED_HIGH || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 24f84b210ce1..fc719a3f8557 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget, struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget); if (!driver - || driver->speed != USB_SPEED_HIGH + || driver->speed < USB_SPEED_HIGH || !driver->setup) return -EINVAL; if (!r8a66597) diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index a552453dc946..b31448229f0b 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver, return -EINVAL; } - if (driver->speed != USB_SPEED_HIGH && - driver->speed != USB_SPEED_FULL) { + if (driver->speed < USB_SPEED_FULL) dev_err(hsotg->dev, "%s: bad speed\n", __func__); - } if (!bind || !driver->setup) { dev_err(hsotg->dev, "%s: missing entry points\n", __func__); diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c index 8d54f893cefe..20a553b46aed 100644 --- a/drivers/usb/gadget/s3c-hsudc.c +++ b/drivers/usb/gadget/s3c-hsudc.c @@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver, int ret; if (!driver - || (driver->speed != USB_SPEED_FULL && - driver->speed != USB_SPEED_HIGH) + || driver->speed < USB_SPEED_FULL || !bind || !driver->unbind || !driver->disconnect || !driver->setup) return -EINVAL; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 56a32033adb3..a60679cbbf85 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1475,6 +1475,7 @@ iso_stream_schedule ( * jump until after the queue is primed. */ else { + int done = 0; start = SCHEDULE_SLOP + (now & ~0x07); /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ @@ -1492,18 +1493,18 @@ iso_stream_schedule ( if (stream->highspeed) { if (itd_slot_ok(ehci, mod, start, stream->usecs, period)) - break; + done = 1; } else { if ((start % 8) >= 6) continue; if (sitd_slot_ok(ehci, mod, stream, start, sched, period)) - break; + done = 1; } - } while (start > next); + } while (start > next && !done); /* no room in the schedule */ - if (start == next) { + if (!done) { ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n", urb, now, now + mod); status = -ENOSPC; diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index d6e175428618..a403b53e86b9 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c @@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset) { qset->td_start = qset->td_end = qset->ntds = 0; - qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); + qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; qset->qh.err_count = 0; qset->qh.scratch[0] = 0; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index aa94c0195791..a1afb7c39f7e 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) ring = xhci->cmd_ring; seg = ring->deq_seg; do { - memset(seg->trbs, 0, SEGMENT_SIZE); + memset(seg->trbs, 0, + sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= + cpu_to_le32(~TRB_CYCLE); seg = seg->next; } while (seg != ring->deq_seg); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index c1fa12ec7a9a..b63ab1570103 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev) */ } - musb_save_context(musb); - spin_unlock_irqrestore(&musb->lock, flags); return 0; } static int musb_resume_noirq(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - - musb_restore_context(musb); - /* for static cmos like DaVinci, register values were preserved * unless for some reason the whole soc powered down or the USB * module got reset through the PSC (vs just being disabled). diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index d51043acfe1a..922148ff8d29 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g, unsigned long flags; int retval = -EINVAL; - if (driver->speed != USB_SPEED_HIGH) + if (driver->speed < USB_SPEED_HIGH) goto err0; pm_runtime_get_sync(musb->controller); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index d9717e0bc1ff..7f4e80338570 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); - struct usbhs_priv *priv; - struct device *dev; - int ret; + struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->setup || - driver->speed != USB_SPEED_HIGH) + driver->speed < USB_SPEED_FULL) return -EINVAL; - dev = usbhsg_gpriv_to_dev(gpriv); - priv = usbhsg_gpriv_to_priv(gpriv); - /* first hook up the driver ... */ gpriv->driver = driver; gpriv->gadget.dev.driver = &driver->driver; - ret = device_add(&gpriv->gadget.dev); - if (ret) { - dev_err(dev, "device_add error %d\n", ret); - goto add_fail; - } - return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD); - -add_fail: - gpriv->driver = NULL; - gpriv->gadget.dev.driver = NULL; - - return ret; } static int usbhsg_gadget_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); - struct usbhs_priv *priv; - struct device *dev; + struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->unbind) return -EINVAL; - dev = usbhsg_gpriv_to_dev(gpriv); - priv = usbhsg_gpriv_to_priv(gpriv); - usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); - device_del(&gpriv->gadget.dev); + gpriv->gadget.dev.driver = NULL; gpriv->driver = NULL; return 0; @@ -827,6 +806,13 @@ static int usbhsg_start(struct usbhs_priv *priv) static int usbhsg_stop(struct usbhs_priv *priv) { + struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); + + /* cable disconnect */ + if (gpriv->driver && + gpriv->driver->disconnect) + gpriv->driver->disconnect(&gpriv->gadget); + return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); } @@ -876,12 +862,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) /* * init gadget */ - device_initialize(&gpriv->gadget.dev); dev_set_name(&gpriv->gadget.dev, "gadget"); gpriv->gadget.dev.parent = dev; gpriv->gadget.name = "renesas_usbhs_udc"; gpriv->gadget.ops = &usbhsg_gadget_ops; gpriv->gadget.is_dualspeed = 1; + ret = device_register(&gpriv->gadget.dev); + if (ret < 0) + goto err_add_udc; INIT_LIST_HEAD(&gpriv->gadget.ep_list); @@ -912,12 +900,15 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) ret = usb_add_gadget_udc(dev, &gpriv->gadget); if (ret) - goto err_add_udc; + goto err_register; dev_info(dev, "gadget probed\n"); return 0; + +err_register: + device_unregister(&gpriv->gadget.dev); err_add_udc: kfree(gpriv->uep); @@ -933,6 +924,8 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv) usb_del_gadget_udc(&gpriv->gadget); + device_unregister(&gpriv->gadget.dev); + usbhsg_controller_unregister(gpriv); kfree(gpriv->uep); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index bd4298bb6750..ff3db5d056a5 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 571fa96b49c7..055b64ef0bba 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -112,6 +112,7 @@ /* Propox devices */ #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 +#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739 /* Lenz LI-USB Computer Interface. */ #define FTDI_LENZ_LIUSB_PID 0xD780 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d865878c9f97..e3426602dc82 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -661,6 +661,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -747,6 +750,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 3041a974faf3..24caba79d722 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Qinglin Ye <yestyle@gmail.com> */ +UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, + "Kingston", + "DT 101 G2", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG ), + /* Reported by Francesco Foresti <frafore@tiscali.it> */ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, "Super Top", diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f0d5718d2587..2ad813674d77 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5107,11 +5107,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root = orig_root->fs_info->extent_root; struct btrfs_free_cluster *last_ptr = NULL; struct btrfs_block_group_cache *block_group = NULL; + struct btrfs_block_group_cache *used_block_group; int empty_cluster = 2 * 1024 * 1024; int allowed_chunk_alloc = 0; int done_chunk_alloc = 0; struct btrfs_space_info *space_info; - int last_ptr_loop = 0; int loop = 0; int index = 0; int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? @@ -5173,6 +5173,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ideal_cache: block_group = btrfs_lookup_block_group(root->fs_info, search_start); + used_block_group = block_group; /* * we don't want to use the block group if it doesn't match our * allocation bits, or if its not cached. @@ -5210,6 +5211,7 @@ search: u64 offset; int cached; + used_block_group = block_group; btrfs_get_block_group(block_group); search_start = block_group->key.objectid; @@ -5286,71 +5288,62 @@ alloc: spin_unlock(&block_group->free_space_ctl->tree_lock); /* - * Ok we want to try and use the cluster allocator, so lets look - * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will - * have tried the cluster allocator plenty of times at this - * point and not have found anything, so we are likely way too - * fragmented for the clustering stuff to find anything, so lets - * just skip it and let the allocator find whatever block it can - * find + * Ok we want to try and use the cluster allocator, so + * lets look there */ - if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) { + if (last_ptr) { /* * the refill lock keeps out other * people trying to start a new cluster */ spin_lock(&last_ptr->refill_lock); - if (!last_ptr->block_group || - last_ptr->block_group->ro || - !block_group_bits(last_ptr->block_group, data)) + used_block_group = last_ptr->block_group; + if (used_block_group != block_group && + (!used_block_group || + used_block_group->ro || + !block_group_bits(used_block_group, data))) { + used_block_group = block_group; goto refill_cluster; + } + + if (used_block_group != block_group) + btrfs_get_block_group(used_block_group); - offset = btrfs_alloc_from_cluster(block_group, last_ptr, - num_bytes, search_start); + offset = btrfs_alloc_from_cluster(used_block_group, + last_ptr, num_bytes, used_block_group->key.objectid); if (offset) { /* we have a block, we're done */ spin_unlock(&last_ptr->refill_lock); goto checks; } - spin_lock(&last_ptr->lock); - /* - * whoops, this cluster doesn't actually point to - * this block group. Get a ref on the block - * group is does point to and try again - */ - if (!last_ptr_loop && last_ptr->block_group && - last_ptr->block_group != block_group && - index <= - get_block_group_index(last_ptr->block_group)) { - - btrfs_put_block_group(block_group); - block_group = last_ptr->block_group; - btrfs_get_block_group(block_group); - spin_unlock(&last_ptr->lock); - spin_unlock(&last_ptr->refill_lock); - - last_ptr_loop = 1; - search_start = block_group->key.objectid; - /* - * we know this block group is properly - * in the list because - * btrfs_remove_block_group, drops the - * cluster before it removes the block - * group from the list - */ - goto have_block_group; + WARN_ON(last_ptr->block_group != used_block_group); + if (used_block_group != block_group) { + btrfs_put_block_group(used_block_group); + used_block_group = block_group; } - spin_unlock(&last_ptr->lock); refill_cluster: + BUG_ON(used_block_group != block_group); + /* If we are on LOOP_NO_EMPTY_SIZE, we can't + * set up a new clusters, so lets just skip it + * and let the allocator find whatever block + * it can find. If we reach this point, we + * will have tried the cluster allocator + * plenty of times and not have found + * anything, so we are likely way too + * fragmented for the clustering stuff to find + * anything. */ + if (loop >= LOOP_NO_EMPTY_SIZE) { + spin_unlock(&last_ptr->refill_lock); + goto unclustered_alloc; + } + /* * this cluster didn't work out, free it and * start over */ btrfs_return_cluster_to_free_space(NULL, last_ptr); - last_ptr_loop = 0; - /* allocate a cluster in this block group */ ret = btrfs_find_space_cluster(trans, root, block_group, last_ptr, @@ -5390,6 +5383,7 @@ refill_cluster: goto loop; } +unclustered_alloc: offset = btrfs_find_space_for_alloc(block_group, search_start, num_bytes, empty_size); /* @@ -5416,14 +5410,14 @@ checks: search_start = stripe_align(root, offset); /* move on to the next group */ if (search_start + num_bytes >= search_end) { - btrfs_add_free_space(block_group, offset, num_bytes); + btrfs_add_free_space(used_block_group, offset, num_bytes); goto loop; } /* move on to the next group */ if (search_start + num_bytes > - block_group->key.objectid + block_group->key.offset) { - btrfs_add_free_space(block_group, offset, num_bytes); + used_block_group->key.objectid + used_block_group->key.offset) { + btrfs_add_free_space(used_block_group, offset, num_bytes); goto loop; } @@ -5431,14 +5425,14 @@ checks: ins->offset = num_bytes; if (offset < search_start) - btrfs_add_free_space(block_group, offset, + btrfs_add_free_space(used_block_group, offset, search_start - offset); BUG_ON(offset > search_start); - ret = btrfs_update_reserved_bytes(block_group, num_bytes, + ret = btrfs_update_reserved_bytes(used_block_group, num_bytes, alloc_type); if (ret == -EAGAIN) { - btrfs_add_free_space(block_group, offset, num_bytes); + btrfs_add_free_space(used_block_group, offset, num_bytes); goto loop; } @@ -5447,15 +5441,19 @@ checks: ins->offset = num_bytes; if (offset < search_start) - btrfs_add_free_space(block_group, offset, + btrfs_add_free_space(used_block_group, offset, search_start - offset); BUG_ON(offset > search_start); + if (used_block_group != block_group) + btrfs_put_block_group(used_block_group); btrfs_put_block_group(block_group); break; loop: failed_cluster_refill = false; failed_alloc = false; BUG_ON(index != get_block_group_index(block_group)); + if (used_block_group != block_group) + btrfs_put_block_group(used_block_group); btrfs_put_block_group(block_group); } up_read(&space_info->groups_sem); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index be1bf627a14b..49f3c9dc09f4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -935,8 +935,10 @@ again: node = tree_search(tree, start); if (!node) { prealloc = alloc_extent_state_atomic(prealloc); - if (!prealloc) - return -ENOMEM; + if (!prealloc) { + err = -ENOMEM; + goto out; + } err = insert_state(tree, prealloc, start, end, &bits); prealloc = NULL; BUG_ON(err == -EEXIST); @@ -992,8 +994,10 @@ hit_next: */ if (state->start < start) { prealloc = alloc_extent_state_atomic(prealloc); - if (!prealloc) - return -ENOMEM; + if (!prealloc) { + err = -ENOMEM; + goto out; + } err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; @@ -1024,8 +1028,10 @@ hit_next: this_end = last_start - 1; prealloc = alloc_extent_state_atomic(prealloc); - if (!prealloc) - return -ENOMEM; + if (!prealloc) { + err = -ENOMEM; + goto out; + } /* * Avoid to free 'prealloc' if it can be merged with @@ -1051,8 +1057,10 @@ hit_next: */ if (state->start <= end && state->end > end) { prealloc = alloc_extent_state_atomic(prealloc); - if (!prealloc) - return -ENOMEM; + if (!prealloc) { + err = -ENOMEM; + goto out; + } err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c37433d3cd82..0a8c8f8304b1 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1611,7 +1611,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) return -EINVAL; - bdev = blkdev_get_by_path(device_path, FMODE_EXCL, + bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, root->fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d6a972df0338..8cd4b52d4217 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, smb_msg.msg_controllen = 0; for (total_read = 0; to_read; total_read += length, to_read -= length) { + try_to_freeze(); + if (server_unresponsive(server)) { total_read = -EAGAIN; break; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index cf0b1539b321..4dd9283885e7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, lock->type, lock->netfid, conf_lock); } +/* + * Check if there is another lock that prevents us to set the lock (mandatory + * style). If such a lock exists, update the flock structure with its + * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks + * or leave it the same if we can't. Returns 0 if we don't need to request to + * the server or 1 otherwise. + */ static int cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length, __u8 type, __u16 netfid, struct file_lock *flock) @@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock) mutex_unlock(&cinode->lock_mutex); } +/* + * Set the byte-range lock (mandatory style). Returns: + * 1) 0, if we set the lock and don't need to request to the server; + * 2) 1, if no locks prevent us but we need to request to the server; + * 3) -EACCESS, if there is a lock that prevents us and wait is false. + */ static int cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, bool wait) @@ -778,6 +791,13 @@ try_again: return rc; } +/* + * Check if there is another lock that prevents us to set the lock (posix + * style). If such a lock exists, update the flock structure with its + * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks + * or leave it the same if we can't. Returns 0 if we don't need to request to + * the server or 1 otherwise. + */ static int cifs_posix_lock_test(struct file *file, struct file_lock *flock) { @@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock) return rc; } +/* + * Set the byte-range lock (posix style). Returns: + * 1) 0, if we set the lock and don't need to request to the server; + * 2) 1, if we need to request to the server; + * 3) <0, if the error occurs while setting the lock. + */ static int cifs_posix_lock_set(struct file *file, struct file_lock *flock) { diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 5de03ec20144..a090bbe6ee29 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon, rc); return rc; } - cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); + /* FindFirst/Next set last_entry to NULL on malformed reply */ + if (cifsFile->srch_inf.last_entry) + cifs_save_resume_key(cifsFile->srch_inf.last_entry, + cifsFile); } while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && @@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon, cFYI(1, "calling findnext2"); rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, &cifsFile->srch_inf); - cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile); + /* FindFirst/Next set last_entry to NULL on malformed reply */ + if (cifsFile->srch_inf.last_entry) + cifs_save_resume_key(cifsFile->srch_inf.last_entry, + cifsFile); if (rc) return -ENOENT; } diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 7cacba12b8f1..80d850881938 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, { int rc; int len; - __u16 wpwd[129]; + __le16 wpwd[129]; /* Password cannot be longer than 128 characters */ if (passwd) /* Password must be converted to NT unicode */ @@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16, *wpwd = 0; /* Ensure string is null terminated */ } - rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16)); - memset(wpwd, 0, 129 * sizeof(__u16)); + rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); + memset(wpwd, 0, 129 * sizeof(__le16)); return rc; } diff --git a/fs/dcache.c b/fs/dcache.c index 10ba92def3f6..89509b5a090e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2439,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) /** * prepend_path - Prepend path string to a buffer * @path: the dentry/vfsmount to report - * @root: root vfsmnt/dentry (may be modified by this function) + * @root: root vfsmnt/dentry * @buffer: pointer to the end of the buffer * @buflen: pointer to buffer length * * Caller holds the rename_lock. - * - * If path is not reachable from the supplied root, then the value of - * root is changed (without modifying refcounts). */ -static int prepend_path(const struct path *path, struct path *root, +static int prepend_path(const struct path *path, + const struct path *root, char **buffer, int *buflen) { struct dentry *dentry = path->dentry; @@ -2483,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root, dentry = parent; } -out: if (!error && !slash) error = prepend(buffer, buflen, "/", 1); +out: br_read_unlock(vfsmount_lock); return error; @@ -2500,15 +2498,17 @@ global_root: WARN(1, "Root dentry has weird name <%.*s>\n", (int) dentry->d_name.len, dentry->d_name.name); } - root->mnt = vfsmnt; - root->dentry = dentry; + if (!slash) + error = prepend(buffer, buflen, "/", 1); + if (!error) + error = vfsmnt->mnt_ns ? 1 : 2; goto out; } /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report - * @root: root vfsmnt/dentry (may be modified by this function) + * @root: root vfsmnt/dentry * @buf: buffer to return value in * @buflen: buffer length * @@ -2519,10 +2519,10 @@ global_root: * * "buflen" should be positive. * - * If path is not reachable from the supplied root, then the value of - * root is changed (without modifying refcounts). + * If the path is not reachable from the supplied root, return %NULL. */ -char *__d_path(const struct path *path, struct path *root, +char *__d_path(const struct path *path, + const struct path *root, char *buf, int buflen) { char *res = buf + buflen; @@ -2533,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root, error = prepend_path(path, root, &res, &buflen); write_sequnlock(&rename_lock); - if (error) + if (error < 0) + return ERR_PTR(error); + if (error > 0) + return NULL; + return res; +} + +char *d_absolute_path(const struct path *path, + char *buf, int buflen) +{ + struct path root = {}; + char *res = buf + buflen; + int error; + + prepend(&res, &buflen, "\0", 1); + write_seqlock(&rename_lock); + error = prepend_path(path, &root, &res, &buflen); + write_sequnlock(&rename_lock); + + if (error > 1) + error = -EINVAL; + if (error < 0) return ERR_PTR(error); return res; } @@ -2541,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root, /* * same as __d_path but appends "(deleted)" for unlinked files. */ -static int path_with_deleted(const struct path *path, struct path *root, - char **buf, int *buflen) +static int path_with_deleted(const struct path *path, + const struct path *root, + char **buf, int *buflen) { prepend(buf, buflen, "\0", 1); if (d_unlinked(path->dentry)) { @@ -2579,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen) { char *res = buf + buflen; struct path root; - struct path tmp; int error; /* @@ -2594,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen) get_fs_root(current->fs, &root); write_seqlock(&rename_lock); - tmp = root; - error = path_with_deleted(path, &tmp, &res, &buflen); - if (error) + error = path_with_deleted(path, &root, &res, &buflen); + if (error < 0) res = ERR_PTR(error); write_sequnlock(&rename_lock); path_put(&root); @@ -2617,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) { char *res = buf + buflen; struct path root; - struct path tmp; int error; if (path->dentry->d_op && path->dentry->d_op->d_dname) @@ -2625,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) get_fs_root(current->fs, &root); write_seqlock(&rename_lock); - tmp = root; - error = path_with_deleted(path, &tmp, &res, &buflen); - if (!error && !path_equal(&tmp, &root)) + error = path_with_deleted(path, &root, &res, &buflen); + if (error > 0) error = prepend_unreachable(&res, &buflen); write_sequnlock(&rename_lock); path_put(&root); @@ -2758,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) write_seqlock(&rename_lock); if (!d_unlinked(pwd.dentry)) { unsigned long len; - struct path tmp = root; char *cwd = page + PAGE_SIZE; int buflen = PAGE_SIZE; prepend(&cwd, &buflen, "\0", 1); - error = prepend_path(&pwd, &tmp, &cwd, &buflen); + error = prepend_path(&pwd, &root, &cwd, &buflen); write_sequnlock(&rename_lock); - if (error) + if (error < 0) goto out; /* Unreachable from current root */ - if (!path_equal(&tmp, &root)) { + if (error > 0) { error = prepend_unreachable(&cwd, &buflen); if (error) goto out; diff --git a/fs/namespace.c b/fs/namespace.c index 6d3a1963879b..cfc6d4448aa5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v) if (err) goto out; seq_putc(m, ' '); - seq_path_root(m, &mnt_path, &root, " \t\n\\"); - if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) { - /* - * Mountpoint is outside root, discard that one. Ugly, - * but less so than trying to do that in iterator in a - * race-free way (due to renames). - */ - return SEQ_SKIP; - } + + /* mountpoints outside of chroot jail will give SEQ_SKIP on this */ + err = seq_path_root(m, &mnt_path, &root, " \t\n\\"); + if (err) + goto out; + seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw"); show_mnt_opts(m, mnt); @@ -2776,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt) } } EXPORT_SYMBOL(kern_unmount); + +bool our_mnt(struct vfsmount *mnt) +{ + return check_mnt(mnt); +} diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 586174168e2a..80e4645f7990 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v) K(i.freeswap), K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_WRITEBACK)), - K(global_page_state(NR_ANON_PAGES) #ifdef CONFIG_TRANSPARENT_HUGEPAGE + K(global_page_state(NR_ANON_PAGES) + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR + HPAGE_PMD_NR), +#else + K(global_page_state(NR_ANON_PAGES)), #endif - ), K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SLAB_RECLAIMABLE) + diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 42b274da92c3..2a30d67dd6b8 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu) idle = kstat_cpu(cpu).cpustat.idle; idle = cputime64_add(idle, arch_idle_time(cpu)); } else - idle = usecs_to_cputime(idle_time); + idle = nsecs_to_jiffies64(1000 * idle_time); return idle; } @@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu) /* !NO_HZ so we can rely on cpustat.iowait */ iowait = kstat_cpu(cpu).cpustat.iowait; else - iowait = usecs_to_cputime(iowait_time); + iowait = nsecs_to_jiffies64(1000 * iowait_time); return iowait; } diff --git a/fs/seq_file.c b/fs/seq_file.c index 05d6b0e78c95..dba43c3ea3af 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path); /* * Same as seq_path, but relative to supplied root. - * - * root may be changed, see __d_path(). */ int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc) @@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *p; p = __d_path(path, root, buf, size); + if (!p) + return SEQ_SKIP; res = PTR_ERR(p); if (!IS_ERR(p)) { char *end = mangle_path(buf, p, esc); @@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root, } seq_commit(m, res); - return res < 0 ? res : 0; + return res < 0 && res != -ENAMETOOLONG ? res : 0; } /* diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index b6c4b3795c4a..76e4266d2e7e 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp) int count, i; count = be32_to_cpu(aclp->acl_cnt); + if (count > XFS_ACL_MAX_ENTRIES) + return ERR_PTR(-EFSCORRUPTED); acl = posix_acl_alloc(count, GFP_KERNEL); if (!acl) diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index d4906e7c9787..c1b55e596551 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags) /* * Query whether the requested number of additional bytes of extended * attribute space will be able to fit inline. + * * Returns zero if not, else the di_forkoff fork offset to be used in the * literal area for attribute data once the new bytes have been added. * @@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) int offset; int minforkoff; /* lower limit on valid forkoff locations */ int maxforkoff; /* upper limit on valid forkoff locations */ - int dsize; + int dsize; xfs_mount_t *mp = dp->i_mount; offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */ @@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) return (offset >= minforkoff) ? minforkoff : 0; } - if (!(mp->m_flags & XFS_MOUNT_ATTR2)) { - if (bytes <= XFS_IFORK_ASIZE(dp)) - return dp->i_d.di_forkoff; + /* + * If the requested numbers of bytes is smaller or equal to the + * current attribute fork size we can always proceed. + * + * Note that if_bytes in the data fork might actually be larger than + * the current data fork size is due to delalloc extents. In that + * case either the extent count will go down when they are converted + * to real extents, or the delalloc conversion will take care of the + * literal area rebalancing. + */ + if (bytes <= XFS_IFORK_ASIZE(dp)) + return dp->i_d.di_forkoff; + + /* + * For attr2 we can try to move the forkoff if there is space in the + * literal area, but for the old format we are done if there is no + * space in the fixed attribute fork. + */ + if (!(mp->m_flags & XFS_MOUNT_ATTR2)) return 0; - } dsize = dp->i_df.if_bytes; - + switch (dp->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: - /* + /* * If there is no attr fork and the data fork is extents, - * determine if creating the default attr fork will result - * in the extents form migrating to btree. If so, the - * minimum offset only needs to be the space required for + * determine if creating the default attr fork will result + * in the extents form migrating to btree. If so, the + * minimum offset only needs to be the space required for * the btree root. - */ + */ if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > xfs_default_attroffset(dp)) dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS); break; - case XFS_DINODE_FMT_BTREE: /* - * If have data btree then keep forkoff if we have one, - * otherwise we are adding a new attr, so then we set - * minforkoff to where the btree root can finish so we have + * If we have a data btree then keep forkoff if we have one, + * otherwise we are adding a new attr, so then we set + * minforkoff to where the btree root can finish so we have * plenty of room for attrs */ if (dp->i_d.di_forkoff) { - if (offset < dp->i_d.di_forkoff) + if (offset < dp->i_d.di_forkoff) return 0; - else - return dp->i_d.di_forkoff; - } else - dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot); + return dp->i_d.di_forkoff; + } + dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot); break; } - - /* - * A data fork btree root must have space for at least + + /* + * A data fork btree root must have space for at least * MINDBTPTRS key/ptr pairs if the data fork is small or empty. */ minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS)); @@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS); maxforkoff = maxforkoff >> 3; /* rounded down */ - if (offset >= minforkoff && offset < maxforkoff) - return offset; if (offset >= maxforkoff) return maxforkoff; + if (offset >= minforkoff) + return offset; return 0; } diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index c68baeb0974a..d0ab78837057 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -2383,6 +2383,8 @@ xfs_bmap_btalloc( int tryagain; int error; + ASSERT(ap->length); + mp = ap->ip->i_mount; align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; if (unlikely(align)) { @@ -4629,6 +4631,8 @@ xfs_bmapi_allocate( int error; int rt; + ASSERT(bma->length > 0); + rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip); /* @@ -4849,6 +4853,7 @@ xfs_bmapi_write( ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); ASSERT(!(flags & XFS_BMAPI_IGSTATE)); ASSERT(tp != NULL); + ASSERT(len > 0); whichfork = (flags & XFS_BMAPI_ATTRFORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; @@ -4918,9 +4923,22 @@ xfs_bmapi_write( bma.eof = eof; bma.conv = !!(flags & XFS_BMAPI_CONVERT); bma.wasdel = wasdelay; - bma.length = len; bma.offset = bno; + /* + * There's a 32/64 bit type mismatch between the + * allocation length request (which can be 64 bits in + * length) and the bma length request, which is + * xfs_extlen_t and therefore 32 bits. Hence we have to + * check for 32-bit overflows and handle them here. + */ + if (len > (xfs_filblks_t)MAXEXTLEN) + bma.length = MAXEXTLEN; + else + bma.length = len; + + ASSERT(len > 0); + ASSERT(bma.length > 0); error = xfs_bmapi_allocate(&bma, flags); if (error) goto error0; diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index da108977b21f..558910f5e3c0 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -98,22 +98,22 @@ xfs_fs_encode_fh( switch (fileid_type) { case FILEID_INO32_GEN_PARENT: spin_lock(&dentry->d_lock); - fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino; + fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation; spin_unlock(&dentry->d_lock); /*FALLTHRU*/ case FILEID_INO32_GEN: - fid->i32.ino = inode->i_ino; + fid->i32.ino = XFS_I(inode)->i_ino; fid->i32.gen = inode->i_generation; break; case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: spin_lock(&dentry->d_lock); - fid64->parent_ino = dentry->d_parent->d_inode->i_ino; + fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; fid64->parent_gen = dentry->d_parent->d_inode->i_generation; spin_unlock(&dentry->d_lock); /*FALLTHRU*/ case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: - fid64->ino = inode->i_ino; + fid64->ino = XFS_I(inode)->i_ino; fid64->gen = inode->i_generation; break; } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c0237c602f11..755ee8164880 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2835,6 +2835,27 @@ corrupt_out: return XFS_ERROR(EFSCORRUPTED); } +void +xfs_promote_inode( + struct xfs_inode *ip) +{ + struct xfs_buf *bp; + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); + + bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno, + ip->i_imap.im_len, XBF_TRYLOCK); + if (!bp) + return; + + if (XFS_BUF_ISDELAYWRITE(bp)) { + xfs_buf_delwri_promote(bp); + wake_up_process(ip->i_mount->m_ddev_targp->bt_task); + } + + xfs_buf_relse(bp); +} + /* * Return a pointer to the extent record at file index idx. */ diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 760140d1dd66..b4cd4739f98e 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -498,6 +498,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); void xfs_iext_realloc(xfs_inode_t *, int, int); void xfs_iunpin_wait(xfs_inode_t *); int xfs_iflush(xfs_inode_t *, uint); +void xfs_promote_inode(struct xfs_inode *); void xfs_lock_inodes(xfs_inode_t **, int, uint); void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a14cd89fe465..34817adf4b9e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -150,6 +150,117 @@ xlog_grant_add_space( } while (head_val != old); } +STATIC bool +xlog_reserveq_wake( + struct log *log, + int *free_bytes) +{ + struct xlog_ticket *tic; + int need_bytes; + + list_for_each_entry(tic, &log->l_reserveq, t_queue) { + if (tic->t_flags & XLOG_TIC_PERM_RESERV) + need_bytes = tic->t_unit_res * tic->t_cnt; + else + need_bytes = tic->t_unit_res; + + if (*free_bytes < need_bytes) + return false; + *free_bytes -= need_bytes; + + trace_xfs_log_grant_wake_up(log, tic); + wake_up(&tic->t_wait); + } + + return true; +} + +STATIC bool +xlog_writeq_wake( + struct log *log, + int *free_bytes) +{ + struct xlog_ticket *tic; + int need_bytes; + + list_for_each_entry(tic, &log->l_writeq, t_queue) { + ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); + + need_bytes = tic->t_unit_res; + + if (*free_bytes < need_bytes) + return false; + *free_bytes -= need_bytes; + + trace_xfs_log_regrant_write_wake_up(log, tic); + wake_up(&tic->t_wait); + } + + return true; +} + +STATIC int +xlog_reserveq_wait( + struct log *log, + struct xlog_ticket *tic, + int need_bytes) +{ + list_add_tail(&tic->t_queue, &log->l_reserveq); + + do { + if (XLOG_FORCED_SHUTDOWN(log)) + goto shutdown; + xlog_grant_push_ail(log, need_bytes); + + XFS_STATS_INC(xs_sleep_logspace); + trace_xfs_log_grant_sleep(log, tic); + + xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); + trace_xfs_log_grant_wake(log, tic); + + spin_lock(&log->l_grant_reserve_lock); + if (XLOG_FORCED_SHUTDOWN(log)) + goto shutdown; + } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes); + + list_del_init(&tic->t_queue); + return 0; +shutdown: + list_del_init(&tic->t_queue); + return XFS_ERROR(EIO); +} + +STATIC int +xlog_writeq_wait( + struct log *log, + struct xlog_ticket *tic, + int need_bytes) +{ + list_add_tail(&tic->t_queue, &log->l_writeq); + + do { + if (XLOG_FORCED_SHUTDOWN(log)) + goto shutdown; + xlog_grant_push_ail(log, need_bytes); + + XFS_STATS_INC(xs_sleep_logspace); + trace_xfs_log_regrant_write_sleep(log, tic); + + xlog_wait(&tic->t_wait, &log->l_grant_write_lock); + trace_xfs_log_regrant_write_wake(log, tic); + + spin_lock(&log->l_grant_write_lock); + if (XLOG_FORCED_SHUTDOWN(log)) + goto shutdown; + } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes); + + list_del_init(&tic->t_queue); + return 0; +shutdown: + list_del_init(&tic->t_queue); + return XFS_ERROR(EIO); +} + static void xlog_tic_reset_res(xlog_ticket_t *tic) { @@ -350,8 +461,19 @@ xfs_log_reserve( retval = xlog_grant_log_space(log, internal_ticket); } + if (unlikely(retval)) { + /* + * If we are failing, make sure the ticket doesn't have any + * current reservations. We don't want to add this back + * when the ticket/ transaction gets cancelled. + */ + internal_ticket->t_curr_res = 0; + /* ungrant will give back unit_res * t_cnt. */ + internal_ticket->t_cnt = 0; + } + return retval; -} /* xfs_log_reserve */ +} /* @@ -2481,8 +2603,8 @@ restart: /* * Atomically get the log space required for a log ticket. * - * Once a ticket gets put onto the reserveq, it will only return after - * the needed reservation is satisfied. + * Once a ticket gets put onto the reserveq, it will only return after the + * needed reservation is satisfied. * * This function is structured so that it has a lock free fast path. This is * necessary because every new transaction reservation will come through this @@ -2490,113 +2612,53 @@ restart: * every pass. * * As tickets are only ever moved on and off the reserveq under the - * l_grant_reserve_lock, we only need to take that lock if we are going - * to add the ticket to the queue and sleep. We can avoid taking the lock if the - * ticket was never added to the reserveq because the t_queue list head will be - * empty and we hold the only reference to it so it can safely be checked - * unlocked. + * l_grant_reserve_lock, we only need to take that lock if we are going to add + * the ticket to the queue and sleep. We can avoid taking the lock if the ticket + * was never added to the reserveq because the t_queue list head will be empty + * and we hold the only reference to it so it can safely be checked unlocked. */ STATIC int -xlog_grant_log_space(xlog_t *log, - xlog_ticket_t *tic) +xlog_grant_log_space( + struct log *log, + struct xlog_ticket *tic) { - int free_bytes; - int need_bytes; + int free_bytes, need_bytes; + int error = 0; -#ifdef DEBUG - if (log->l_flags & XLOG_ACTIVE_RECOVERY) - panic("grant Recovery problem"); -#endif + ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); trace_xfs_log_grant_enter(log, tic); + /* + * If there are other waiters on the queue then give them a chance at + * logspace before us. Wake up the first waiters, if we do not wake + * up all the waiters then go to sleep waiting for more free space, + * otherwise try to get some space for this transaction. + */ need_bytes = tic->t_unit_res; if (tic->t_flags & XFS_LOG_PERM_RESERV) need_bytes *= tic->t_ocnt; - - /* something is already sleeping; insert new transaction at end */ - if (!list_empty_careful(&log->l_reserveq)) { - spin_lock(&log->l_grant_reserve_lock); - /* recheck the queue now we are locked */ - if (list_empty(&log->l_reserveq)) { - spin_unlock(&log->l_grant_reserve_lock); - goto redo; - } - list_add_tail(&tic->t_queue, &log->l_reserveq); - - trace_xfs_log_grant_sleep1(log, tic); - - /* - * Gotta check this before going to sleep, while we're - * holding the grant lock. - */ - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; - - XFS_STATS_INC(xs_sleep_logspace); - xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); - - /* - * If we got an error, and the filesystem is shutting down, - * we'll catch it down below. So just continue... - */ - trace_xfs_log_grant_wake1(log, tic); - } - -redo: - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return_unlocked; - free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); - if (free_bytes < need_bytes) { + if (!list_empty_careful(&log->l_reserveq)) { spin_lock(&log->l_grant_reserve_lock); - if (list_empty(&tic->t_queue)) - list_add_tail(&tic->t_queue, &log->l_reserveq); - - trace_xfs_log_grant_sleep2(log, tic); - - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; - - xlog_grant_push_ail(log, need_bytes); - - XFS_STATS_INC(xs_sleep_logspace); - xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); - - trace_xfs_log_grant_wake2(log, tic); - goto redo; - } - - if (!list_empty(&tic->t_queue)) { + if (!xlog_reserveq_wake(log, &free_bytes) || + free_bytes < need_bytes) + error = xlog_reserveq_wait(log, tic, need_bytes); + spin_unlock(&log->l_grant_reserve_lock); + } else if (free_bytes < need_bytes) { spin_lock(&log->l_grant_reserve_lock); - list_del_init(&tic->t_queue); + error = xlog_reserveq_wait(log, tic, need_bytes); spin_unlock(&log->l_grant_reserve_lock); } + if (error) + return error; - /* we've got enough space */ xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_grant_exit(log, tic); xlog_verify_grant_tail(log); return 0; - -error_return_unlocked: - spin_lock(&log->l_grant_reserve_lock); -error_return: - list_del_init(&tic->t_queue); - spin_unlock(&log->l_grant_reserve_lock); - trace_xfs_log_grant_error(log, tic); - - /* - * If we are failing, make sure the ticket doesn't have any - * current reservations. We don't want to add this back when - * the ticket/transaction gets cancelled. - */ - tic->t_curr_res = 0; - tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ - return XFS_ERROR(EIO); -} /* xlog_grant_log_space */ - +} /* * Replenish the byte reservation required by moving the grant write head. @@ -2605,10 +2667,12 @@ error_return: * free fast path. */ STATIC int -xlog_regrant_write_log_space(xlog_t *log, - xlog_ticket_t *tic) +xlog_regrant_write_log_space( + struct log *log, + struct xlog_ticket *tic) { - int free_bytes, need_bytes; + int free_bytes, need_bytes; + int error = 0; tic->t_curr_res = tic->t_unit_res; xlog_tic_reset_res(tic); @@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t *log, if (tic->t_cnt > 0) return 0; -#ifdef DEBUG - if (log->l_flags & XLOG_ACTIVE_RECOVERY) - panic("regrant Recovery problem"); -#endif + ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); trace_xfs_log_regrant_write_enter(log, tic); - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return_unlocked; - /* If there are other waiters on the queue then give them a - * chance at logspace before us. Wake up the first waiters, - * if we do not wake up all the waiters then go to sleep waiting - * for more free space, otherwise try to get some space for - * this transaction. + /* + * If there are other waiters on the queue then give them a chance at + * logspace before us. Wake up the first waiters, if we do not wake + * up all the waiters then go to sleep waiting for more free space, + * otherwise try to get some space for this transaction. */ need_bytes = tic->t_unit_res; - if (!list_empty_careful(&log->l_writeq)) { - struct xlog_ticket *ntic; - - spin_lock(&log->l_grant_write_lock); - free_bytes = xlog_space_left(log, &log->l_grant_write_head); - list_for_each_entry(ntic, &log->l_writeq, t_queue) { - ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); - - if (free_bytes < ntic->t_unit_res) - break; - free_bytes -= ntic->t_unit_res; - wake_up(&ntic->t_wait); - } - - if (ntic != list_first_entry(&log->l_writeq, - struct xlog_ticket, t_queue)) { - if (list_empty(&tic->t_queue)) - list_add_tail(&tic->t_queue, &log->l_writeq); - trace_xfs_log_regrant_write_sleep1(log, tic); - - xlog_grant_push_ail(log, need_bytes); - - XFS_STATS_INC(xs_sleep_logspace); - xlog_wait(&tic->t_wait, &log->l_grant_write_lock); - trace_xfs_log_regrant_write_wake1(log, tic); - } else - spin_unlock(&log->l_grant_write_lock); - } - -redo: - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return_unlocked; - free_bytes = xlog_space_left(log, &log->l_grant_write_head); - if (free_bytes < need_bytes) { + if (!list_empty_careful(&log->l_writeq)) { spin_lock(&log->l_grant_write_lock); - if (list_empty(&tic->t_queue)) - list_add_tail(&tic->t_queue, &log->l_writeq); - - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; - - xlog_grant_push_ail(log, need_bytes); - - XFS_STATS_INC(xs_sleep_logspace); - trace_xfs_log_regrant_write_sleep2(log, tic); - xlog_wait(&tic->t_wait, &log->l_grant_write_lock); - - trace_xfs_log_regrant_write_wake2(log, tic); - goto redo; - } - - if (!list_empty(&tic->t_queue)) { + if (!xlog_writeq_wake(log, &free_bytes) || + free_bytes < need_bytes) + error = xlog_writeq_wait(log, tic, need_bytes); + spin_unlock(&log->l_grant_write_lock); + } else if (free_bytes < need_bytes) { spin_lock(&log->l_grant_write_lock); - list_del_init(&tic->t_queue); + error = xlog_writeq_wait(log, tic, need_bytes); spin_unlock(&log->l_grant_write_lock); } - /* we've got enough space */ + if (error) + return error; + xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_regrant_write_exit(log, tic); xlog_verify_grant_tail(log); return 0; - - - error_return_unlocked: - spin_lock(&log->l_grant_write_lock); - error_return: - list_del_init(&tic->t_queue); - spin_unlock(&log->l_grant_write_lock); - trace_xfs_log_regrant_write_error(log, tic); - - /* - * If we are failing, make sure the ticket doesn't have any - * current reservations. We don't want to add this back when - * the ticket/transaction gets cancelled. - */ - tic->t_curr_res = 0; - tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ - return XFS_ERROR(EIO); -} /* xlog_regrant_write_log_space */ - +} /* The first cnt-1 times through here we don't need to * move the grant write head because the permanent diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c index aa3dc1a4d53d..be5c51d8f757 100644 --- a/fs/xfs/xfs_sync.c +++ b/fs/xfs/xfs_sync.c @@ -770,6 +770,17 @@ restart: if (!xfs_iflock_nowait(ip)) { if (!(sync_mode & SYNC_WAIT)) goto out; + + /* + * If we only have a single dirty inode in a cluster there is + * a fair chance that the AIL push may have pushed it into + * the buffer, but xfsbufd won't touch it until 30 seconds + * from now, and thus we will lock up here. + * + * Promote the inode buffer to the front of the delwri list + * and wake up xfsbufd now. + */ + xfs_promote_inode(ip); xfs_iflock(ip); } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index f1d2802b2f07..494035798873 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write); DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake); DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index f4c38d8c6674..2292d1af9d70 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs) __SYSCALL(__NR_setns, sys_setns) #define __NR_sendmmsg 269 __SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg) +#define __NR_process_vm_readv 270 +__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \ + compat_sys_process_vm_readv) +#define __NR_process_vm_writev 271 +__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \ + compat_sys_process_vm_writev) #undef __NR_syscalls -#define __NR_syscalls 270 +#define __NR_syscalls 272 /* * All syscalls below here should go away really, diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index f81676f1b310..4e4fbb820e20 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -197,6 +197,14 @@ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ diff --git a/include/linux/compat.h b/include/linux/compat.h index 154bf5683015..66ed067fb729 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type, extern void __user *compat_alloc_user_space(unsigned long len); +asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, + const struct compat_iovec __user *lvec, + unsigned long liovcnt, const struct compat_iovec __user *rvec, + unsigned long riovcnt, unsigned long flags); +asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, + const struct compat_iovec __user *lvec, + unsigned long liovcnt, const struct compat_iovec __user *rvec, + unsigned long riovcnt, unsigned long flags); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 4df926199369..ed9f74f6c519 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *); */ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); -extern char *__d_path(const struct path *path, struct path *root, char *, int); +extern char *__d_path(const struct path *, const struct path *, char *, int); +extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *d_path_with_unreachable(const struct path *, char *, int); extern char *dentry_path_raw(struct dentry *, char *, int); diff --git a/include/linux/fs.h b/include/linux/fs.h index e3130220ce3e..e0bc4ffb8e7f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -393,8 +393,8 @@ struct inodes_stat_t { #include <linux/semaphore.h> #include <linux/fiemap.h> #include <linux/rculist_bl.h> -#include <linux/shrinker.h> #include <linux/atomic.h> +#include <linux/shrinker.h> #include <asm/byteorder.h> @@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *); extern int statfs_by_dentry(struct dentry *, struct kstatfs *); extern int freeze_super(struct super_block *super); extern int thaw_super(struct super_block *super); +extern bool our_mnt(struct vfsmount *mnt); extern int current_umask(void); diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 96efa6794ea5..c3da42dd22ba 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -172,6 +172,7 @@ enum { TRACE_EVENT_FL_FILTERED_BIT, TRACE_EVENT_FL_RECORDED_CMD_BIT, TRACE_EVENT_FL_CAP_ANY_BIT, + TRACE_EVENT_FL_NO_SET_FILTER_BIT, }; enum { @@ -179,6 +180,7 @@ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), + TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), }; struct ftrace_event_call { diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 94b1e356c02a..32574eef9394 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -126,6 +126,8 @@ extern struct cred init_cred; # define INIT_PERF_EVENTS(tsk) #endif +#define INIT_TASK_COMM "swapper" + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -162,7 +164,7 @@ extern struct cred init_cred; .group_leader = &tsk, \ RCU_INIT_POINTER(.real_cred, &init_cred), \ RCU_INIT_POINTER(.cred, &init_cred), \ - .comm = "swapper", \ + .comm = INIT_TASK_COMM, \ .thread = INIT_THREAD, \ .fs = &init_fs, \ .files = &init_files, \ diff --git a/include/linux/mm.h b/include/linux/mm.h index 3dc3a8c2c485..4baadd18f4ad 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -10,6 +10,7 @@ #include <linux/mmzone.h> #include <linux/rbtree.h> #include <linux/prio_tree.h> +#include <linux/atomic.h> #include <linux/debug_locks.h> #include <linux/mm_types.h> #include <linux/range.h> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cbeb5867cff7..a82ad4dd306a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2536,6 +2536,8 @@ extern void net_disable_timestamp(void); extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); extern void dev_seq_stop(struct seq_file *seq, void *v); +extern int dev_seq_open_ops(struct inode *inode, struct file *file, + const struct seq_operations *ops); #endif extern int netdev_class_create_file(struct class_attribute *class_attr); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 172ba70306d1..2aaee0ca9da8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -517,8 +517,12 @@ #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 +#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 +#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 +#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 #define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603 #define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604 +#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1e9ebe5e0091..b1f89122bf6a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -822,6 +822,7 @@ struct perf_event { int mmap_locked; struct user_struct *mmap_user; struct ring_buffer *rb; + struct list_head rb_entry; /* poll related */ wait_queue_head_t waitq; diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index c5336705921f..7281d5acf2f9 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -30,7 +30,7 @@ */ struct tc_stats { - __u64 bytes; /* NUmber of enqueues bytes */ + __u64 bytes; /* Number of enqueued bytes */ __u32 packets; /* Number of enqueued packets */ __u32 drops; /* Packets dropped because of lack of resources */ __u32 overlimits; /* Number of throttle events when this @@ -297,7 +297,7 @@ struct tc_htb_glob { __u32 debug; /* debug flags */ /* stats */ - __u32 direct_pkts; /* count of non shapped packets */ + __u32 direct_pkts; /* count of non shaped packets */ }; enum { TCA_HTB_UNSPEC, @@ -503,7 +503,7 @@ enum { }; #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1) -/* State transition probablities for 4 state model */ +/* State transition probabilities for 4 state model */ struct tc_netem_gimodel { __u32 p13; __u32 p31; diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index a83833a1f7a2..07ceb97d53fa 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -35,7 +35,7 @@ struct shrinker { /* These are for internal use */ struct list_head list; - long nr; /* objs pending delete */ + atomic_long_t nr_in_batch; /* objs pending delete */ }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ extern void register_shrinker(struct shrinker *); diff --git a/include/linux/sigma.h b/include/linux/sigma.h index e2accb3164d8..d0de882c0d96 100644 --- a/include/linux/sigma.h +++ b/include/linux/sigma.h @@ -24,7 +24,7 @@ struct sigma_firmware { struct sigma_firmware_header { unsigned char magic[7]; u8 version; - u32 crc; + __le32 crc; }; enum { @@ -40,19 +40,14 @@ enum { struct sigma_action { u8 instr; u8 len_hi; - u16 len; - u16 addr; + __le16 len; + __be16 addr; unsigned char payload[]; }; static inline u32 sigma_action_len(struct sigma_action *sa) { - return (sa->len_hi << 16) | sa->len; -} - -static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len) -{ - return sizeof(*sa) + payload_len + (payload_len % 2); + return (sa->len_hi << 16) | le16_to_cpu(sa->len); } extern int process_sigma_firmware(struct i2c_client *client, const char *name); diff --git a/include/net/dst.h b/include/net/dst.h index 4fb6c4381791..6faec1a60216 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -205,12 +205,7 @@ dst_feature(const struct dst_entry *dst, u32 feature) static inline u32 dst_mtu(const struct dst_entry *dst) { - u32 mtu = dst_metric_raw(dst, RTAX_MTU); - - if (!mtu) - mtu = dst->ops->default_mtu(dst); - - return mtu; + return dst->ops->mtu(dst); } /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 9adb99845a56..e1c2ee0eef47 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -17,7 +17,7 @@ struct dst_ops { int (*gc)(struct dst_ops *ops); struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); unsigned int (*default_advmss)(const struct dst_entry *); - unsigned int (*default_mtu)(const struct dst_entry *); + unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index b897d6e6d0a5..f941964a9931 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -31,6 +31,7 @@ /** struct ip_options - IP Options * * @faddr - Saved first hop address + * @nexthop - Saved nexthop address in LSRR and SSRR * @is_data - Options in __data, rather than skb * @is_strictroute - Strict source route * @srr_is_hit - Packet destination addr was our one @@ -41,6 +42,7 @@ */ struct ip_options { __be32 faddr; + __be32 nexthop; unsigned char optlen; unsigned char srr; unsigned char rr; diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 78c83e62218f..e9ff3fc5e688 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -35,6 +35,7 @@ struct inet_peer { u32 metrics[RTAX_MAX]; u32 rate_tokens; /* rate limiting for ICMP */ + int redirect_genid; unsigned long rate_last; unsigned long pmtu_expires; u32 pmtu_orig; diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 4283508b3e18..a88fb6939387 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -67,18 +67,18 @@ struct nf_ct_event_notifier { int (*fcn)(unsigned int events, struct nf_ct_event *item); }; -extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; -extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb); -extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb); +extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb); +extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb); extern void nf_ct_deliver_cached_events(struct nf_conn *ct); static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) { + struct net *net = nf_ct_net(ct); struct nf_conntrack_ecache *e; - if (nf_conntrack_event_cb == NULL) + if (net->ct.nf_conntrack_event_cb == NULL) return; e = nf_ct_ecache_find(ct); @@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask, int report) { int ret = 0; + struct net *net = nf_ct_net(ct); struct nf_ct_event_notifier *notify; struct nf_conntrack_ecache *e; rcu_read_lock(); - notify = rcu_dereference(nf_conntrack_event_cb); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); if (notify == NULL) goto out_unlock; @@ -164,9 +165,8 @@ struct nf_exp_event_notifier { int (*fcn)(unsigned int events, struct nf_exp_event *item); }; -extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb; -extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb); -extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb); +extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb); +extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb); static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, @@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event, u32 pid, int report) { + struct net *net = nf_ct_exp_net(exp); struct nf_exp_event_notifier *notify; struct nf_conntrack_ecache *e; rcu_read_lock(); - notify = rcu_dereference(nf_expect_event_cb); + notify = rcu_dereference(net->ct.nf_expect_event_cb); if (notify == NULL) goto out_unlock; diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 0249399e51a7..7a911eca0f18 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -18,6 +18,8 @@ struct netns_ct { struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; struct ip_conntrack_stat __percpu *stat; + struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb; + struct nf_exp_event_notifier __rcu *nf_expect_event_cb; int sysctl_events; unsigned int sysctl_events_retry_timeout; int sysctl_acct; diff --git a/include/net/red.h b/include/net/red.h index 3319f16b3beb..b72a3b833936 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -116,7 +116,7 @@ struct red_parms { u32 qR; /* Cached random number */ unsigned long qavg; /* Average queue length: A scaled */ - psched_time_t qidlestart; /* Start of current idle period */ + ktime_t qidlestart; /* Start of current idle period */ }; static inline u32 red_rmask(u8 Plog) @@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p, static inline int red_is_idling(struct red_parms *p) { - return p->qidlestart != PSCHED_PASTPERFECT; + return p->qidlestart.tv64 != 0; } static inline void red_start_of_idle_period(struct red_parms *p) { - p->qidlestart = psched_get_time(); + p->qidlestart = ktime_get(); } static inline void red_end_of_idle_period(struct red_parms *p) { - p->qidlestart = PSCHED_PASTPERFECT; + p->qidlestart.tv64 = 0; } static inline void red_restart(struct red_parms *p) @@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p) static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p) { - psched_time_t now; - long us_idle; + s64 delta = ktime_us_delta(ktime_get(), p->qidlestart); + long us_idle = min_t(s64, delta, p->Scell_max); int shift; - now = psched_get_time(); - us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max); - /* * The problem: ideally, average length queue recalcultion should * be done over constant clock intervals. This is too expensive, so diff --git a/include/net/route.h b/include/net/route.h index db7b3432f07c..91855d185b53 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -71,12 +71,12 @@ struct rtable { struct fib_info *fi; /* for client ref to shared metrics */ }; -static inline bool rt_is_input_route(struct rtable *rt) +static inline bool rt_is_input_route(const struct rtable *rt) { return rt->rt_route_iif != 0; } -static inline bool rt_is_output_route(struct rtable *rt) +static inline bool rt_is_output_route(const struct rtable *rt) { return rt->rt_route_iif == 0; } diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 7f5fed3c89e1..6873c7dd9145 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -103,9 +103,10 @@ enum se_cmd_flags_table { SCF_SCSI_NON_DATA_CDB = 0x00000040, SCF_SCSI_CDB_EXCEPTION = 0x00000080, SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, - SCF_SE_CMD_FAILED = 0x00000400, + SCF_FUA = 0x00000200, SCF_SE_LUN_CMD = 0x00000800, SCF_SE_ALLOW_EOO = 0x00001000, + SCF_BIDI = 0x00002000, SCF_SENT_CHECK_CONDITION = 0x00004000, SCF_OVERFLOW_BIT = 0x00008000, SCF_UNDERFLOW_BIT = 0x00010000, @@ -154,6 +155,7 @@ enum tcm_sense_reason_table { TCM_CHECK_CONDITION_ABORT_CMD = 0x0d, TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, TCM_CHECK_CONDITION_NOT_READY = 0x0f, + TCM_RESERVATION_CONFLICT = 0x10, }; struct se_obj { @@ -211,7 +213,6 @@ struct t10_alua_lu_gp { u16 lu_gp_id; int lu_gp_valid_id; u32 lu_gp_members; - atomic_t lu_gp_shutdown; atomic_t lu_gp_ref_cnt; spinlock_t lu_gp_lock; struct config_group lu_gp_group; @@ -422,11 +423,9 @@ struct se_cmd { int sam_task_attr; /* Transport protocol dependent state, see transport_state_table */ enum transport_state_table t_state; - /* Transport specific error status */ - int transport_error_status; /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */ - int check_release:1; - int cmd_wait_set:1; + unsigned check_release:1; + unsigned cmd_wait_set:1; /* See se_cmd_flags_table */ u32 se_cmd_flags; u32 se_ordered_id; @@ -441,13 +440,10 @@ struct se_cmd { /* Used for sense data */ void *sense_buffer; struct list_head se_delayed_node; - struct list_head se_ordered_node; struct list_head se_lun_node; struct list_head se_qf_node; struct se_device *se_dev; struct se_dev_entry *se_deve; - struct se_device *se_obj_ptr; - struct se_device *se_orig_obj_ptr; struct se_lun *se_lun; /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; @@ -463,8 +459,6 @@ struct se_cmd { unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; unsigned long long t_task_lba; int t_tasks_failed; - int t_tasks_fua; - bool t_tasks_bidi; u32 t_tasks_sg_chained_no; atomic_t t_fe_count; atomic_t t_se_count; @@ -489,14 +483,6 @@ struct se_cmd { struct work_struct work; - /* - * Used for pre-registered fabric SGL passthrough WRITE and READ - * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop - * and other HW target mode fabric modules. - */ - struct scatterlist *t_task_pt_sgl; - u32 t_task_pt_sgl_num; - struct scatterlist *t_data_sg; unsigned int t_data_nents; struct scatterlist *t_bidi_data_sg; @@ -562,7 +548,7 @@ struct se_node_acl { } ____cacheline_aligned; struct se_session { - int sess_tearing_down:1; + unsigned sess_tearing_down:1; u64 sess_bin_isid; struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; @@ -683,7 +669,6 @@ struct se_subsystem_dev { struct t10_reservation t10_pr; spinlock_t se_dev_lock; void *se_dev_su_ptr; - struct list_head se_dev_node; struct config_group se_dev_group; /* For T10 Reservations */ struct config_group se_dev_pr_group; @@ -692,9 +677,6 @@ struct se_subsystem_dev { } ____cacheline_aligned; struct se_device { - /* Set to 1 if thread is NOT sleeping on thread_sem */ - u8 thread_active; - u8 dev_status_timer_flags; /* RELATIVE TARGET PORT IDENTIFER Counter */ u16 dev_rpti_counter; /* Used for SAM Task Attribute ordering */ @@ -719,14 +701,10 @@ struct se_device { u64 write_bytes; spinlock_t stats_lock; /* Active commands on this virtual SE device */ - atomic_t active_cmds; atomic_t simple_cmds; atomic_t depth_left; atomic_t dev_ordered_id; - atomic_t dev_tur_active; atomic_t execute_tasks; - atomic_t dev_status_thr_count; - atomic_t dev_hoq_count; atomic_t dev_ordered_sync; atomic_t dev_qf_count; struct se_obj dev_obj; @@ -734,14 +712,9 @@ struct se_device { struct se_obj dev_export_obj; struct se_queue_obj dev_queue_obj; spinlock_t delayed_cmd_lock; - spinlock_t ordered_cmd_lock; spinlock_t execute_task_lock; - spinlock_t state_task_lock; - spinlock_t dev_alua_lock; spinlock_t dev_reservation_lock; - spinlock_t dev_state_lock; spinlock_t dev_status_lock; - spinlock_t dev_status_thr_lock; spinlock_t se_port_lock; spinlock_t se_tmr_lock; spinlock_t qf_cmd_lock; @@ -753,14 +726,10 @@ struct se_device { struct t10_pr_registration *dev_pr_res_holder; struct list_head dev_sep_list; struct list_head dev_tmr_list; - struct timer_list dev_status_timer; /* Pointer to descriptor for processing thread */ struct task_struct *process_thread; - pid_t process_thread_pid; - struct task_struct *dev_mgmt_thread; struct work_struct qf_work_queue; struct list_head delayed_cmd_list; - struct list_head ordered_cmd_list; struct list_head execute_task_list; struct list_head state_task_list; struct list_head qf_cmd_list; @@ -771,8 +740,6 @@ struct se_device { struct se_subsystem_api *transport; /* Linked list for struct se_hba struct se_device list */ struct list_head dev_list; - /* Linked list for struct se_global->g_se_dev_list */ - struct list_head g_se_dev_list; } ____cacheline_aligned; struct se_hba { @@ -834,7 +801,6 @@ struct se_port { u32 sep_index; struct scsi_port_stats sep_stats; /* Used for ALUA Target Port Groups membership */ - atomic_t sep_tg_pt_gp_active; atomic_t sep_tg_pt_secondary_offline; /* Used for PR ALL_TG_PT=1 */ atomic_t sep_tg_pt_ref_cnt; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index c16e9431dd01..dac4f2d859fd 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -10,29 +10,6 @@ #define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */ -#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0 -#define PYX_TRANSPORT_WRITE_PENDING 1 - -#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1 -#define PYX_TRANSPORT_HBA_QUEUE_FULL -2 -#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3 -#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4 -#define PYX_TRANSPORT_INVALID_CDB_FIELD -5 -#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6 -#define PYX_TRANSPORT_LU_COMM_FAILURE -7 -#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 -#define PYX_TRANSPORT_WRITE_PROTECTED -9 -#define PYX_TRANSPORT_RESERVATION_CONFLICT -10 -#define PYX_TRANSPORT_ILLEGAL_REQUEST -11 -#define PYX_TRANSPORT_USE_SENSE_REASON -12 - -#ifndef SAM_STAT_RESERVATION_CONFLICT -#define SAM_STAT_RESERVATION_CONFLICT 0x18 -#endif - -#define TRANSPORT_PLUGIN_FREE 0 -#define TRANSPORT_PLUGIN_REGISTERED 1 - #define TRANSPORT_PLUGIN_PHBA_PDEV 1 #define TRANSPORT_PLUGIN_VHBA_PDEV 2 #define TRANSPORT_PLUGIN_VHBA_VDEV 3 @@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); extern int transport_handle_cdb_direct(struct se_cmd *); extern int transport_generic_handle_cdb_map(struct se_cmd *); extern int transport_generic_handle_data(struct se_cmd *); -extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); extern bool target_stop_task(struct se_task *task, unsigned long *flags); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e8457da6f95..d3b9df5962c2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, static void update_context_time(struct perf_event_context *ctx); static u64 perf_event_time(struct perf_event *event); +static void ring_buffer_attach(struct perf_event *event, + struct ring_buffer *rb); + void __weak perf_event_print_debug(void) { } extern __weak const char *perf_pmu_name(void) @@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, */ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); - perf_event_sched_in(cpuctx, ctx, task); + if (ctx->nr_events) + cpuctx->task_ctx = ctx; - cpuctx->task_ctx = ctx; + perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); perf_pmu_enable(ctx->pmu); perf_ctx_unlock(cpuctx, ctx); @@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) struct ring_buffer *rb; unsigned int events = POLL_HUP; + /* + * Race between perf_event_set_output() and perf_poll(): perf_poll() + * grabs the rb reference but perf_event_set_output() overrides it. + * Here is the timeline for two threads T1, T2: + * t0: T1, rb = rcu_dereference(event->rb) + * t1: T2, old_rb = event->rb + * t2: T2, event->rb = new rb + * t3: T2, ring_buffer_detach(old_rb) + * t4: T1, ring_buffer_attach(rb1) + * t5: T1, poll_wait(event->waitq) + * + * To avoid this problem, we grab mmap_mutex in perf_poll() + * thereby ensuring that the assignment of the new ring buffer + * and the detachment of the old buffer appear atomic to perf_poll() + */ + mutex_lock(&event->mmap_mutex); + rcu_read_lock(); rb = rcu_dereference(event->rb); - if (rb) + if (rb) { + ring_buffer_attach(event, rb); events = atomic_xchg(&rb->poll, 0); + } rcu_read_unlock(); + mutex_unlock(&event->mmap_mutex); + poll_wait(file, &event->waitq, wait); return events; @@ -3496,6 +3521,49 @@ unlock: return ret; } +static void ring_buffer_attach(struct perf_event *event, + struct ring_buffer *rb) +{ + unsigned long flags; + + if (!list_empty(&event->rb_entry)) + return; + + spin_lock_irqsave(&rb->event_lock, flags); + if (!list_empty(&event->rb_entry)) + goto unlock; + + list_add(&event->rb_entry, &rb->event_list); +unlock: + spin_unlock_irqrestore(&rb->event_lock, flags); +} + +static void ring_buffer_detach(struct perf_event *event, + struct ring_buffer *rb) +{ + unsigned long flags; + + if (list_empty(&event->rb_entry)) + return; + + spin_lock_irqsave(&rb->event_lock, flags); + list_del_init(&event->rb_entry); + wake_up_all(&event->waitq); + spin_unlock_irqrestore(&rb->event_lock, flags); +} + +static void ring_buffer_wakeup(struct perf_event *event) +{ + struct ring_buffer *rb; + + rcu_read_lock(); + rb = rcu_dereference(event->rb); + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { + wake_up_all(&event->waitq); + } + rcu_read_unlock(); +} + static void rb_free_rcu(struct rcu_head *rcu_head) { struct ring_buffer *rb; @@ -3521,9 +3589,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event) static void ring_buffer_put(struct ring_buffer *rb) { + struct perf_event *event, *n; + unsigned long flags; + if (!atomic_dec_and_test(&rb->refcount)) return; + spin_lock_irqsave(&rb->event_lock, flags); + list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { + list_del_init(&event->rb_entry); + wake_up_all(&event->waitq); + } + spin_unlock_irqrestore(&rb->event_lock, flags); + call_rcu(&rb->rcu_head, rb_free_rcu); } @@ -3546,6 +3624,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); vma->vm_mm->pinned_vm -= event->mmap_locked; rcu_assign_pointer(event->rb, NULL); + ring_buffer_detach(event, rb); mutex_unlock(&event->mmap_mutex); ring_buffer_put(rb); @@ -3700,7 +3779,7 @@ static const struct file_operations perf_fops = { void perf_event_wakeup(struct perf_event *event) { - wake_up_all(&event->waitq); + ring_buffer_wakeup(event); if (event->pending_kill) { kill_fasync(&event->fasync, SIGIO, event->pending_kill); @@ -5822,6 +5901,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, INIT_LIST_HEAD(&event->group_entry); INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); + INIT_LIST_HEAD(&event->rb_entry); + init_waitqueue_head(&event->waitq); init_irq_work(&event->pending, perf_pending_event); @@ -6028,6 +6109,8 @@ set: old_rb = event->rb; rcu_assign_pointer(event->rb, rb); + if (old_rb) + ring_buffer_detach(event, old_rb); ret = 0; unlock: mutex_unlock(&event->mmap_mutex); diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 09097dd8116c..64568a699375 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -22,6 +22,9 @@ struct ring_buffer { local_t lost; /* nr records lost */ long watermark; /* wakeup watermark */ + /* poll crap */ + spinlock_t event_lock; + struct list_head event_list; struct perf_event_mmap_page *user_page; void *data_pages[0]; diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index a2a29205cc0f..7f3011c6b57f 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) rb->writable = 1; atomic_set(&rb->refcount, 1); + + INIT_LIST_HEAD(&rb->event_list); + spin_lock_init(&rb->event_lock); } #ifndef CONFIG_PERF_USE_VMALLOC diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0e2b179bc7b3..1da999f5e746 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) static int irq_wait_for_interrupt(struct irqaction *action) { + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { @@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action) return 0; } schedule(); + set_current_state(TASK_INTERRUPTIBLE); } + __set_current_state(TASK_RUNNING); return -1; } diff --git a/kernel/jump_label.c b/kernel/jump_label.c index bbdfe2a462a0..66ff7109f697 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key) return; jump_label_lock(); - if (atomic_add_return(1, &key->enabled) == 1) + if (atomic_read(&key->enabled) == 0) jump_label_update(key, JUMP_LABEL_ENABLE); + atomic_inc(&key->enabled); jump_label_unlock(); } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e69434b070da..b2e08c932d91 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -44,6 +44,7 @@ #include <linux/stringify.h> #include <linux/bitops.h> #include <linux/gfp.h> +#include <linux/kmemcheck.h> #include <asm/sections.h> @@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { - memset(lock, 0, sizeof(*lock)); + int i; + + kmemcheck_mark_initialized(lock, sizeof(*lock)); + + for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) + lock->class_cache[i] = NULL; #ifdef CONFIG_LOCK_STAT lock->cpu = raw_smp_processor_id(); diff --git a/kernel/printk.c b/kernel/printk.c index 1455a0d4eedd..7982a0a841ea 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1293,10 +1293,11 @@ again: raw_spin_lock(&logbuf_lock); if (con_start != log_end) retry = 1; + raw_spin_unlock_irqrestore(&logbuf_lock, flags); + if (retry && console_trylock()) goto again; - raw_spin_unlock_irqrestore(&logbuf_lock, flags); if (wake_klogd) wake_up_klogd(); } diff --git a/kernel/sched.c b/kernel/sched.c index 0e9344a71be3..d6b149ccf925 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -71,6 +71,7 @@ #include <linux/ctype.h> #include <linux/ftrace.h> #include <linux/slab.h> +#include <linux/init_task.h> #include <asm/tlb.h> #include <asm/irq_regs.h> @@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion); * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. The timeout is in jiffies. It is not * interruptible. + * + * The return value is 0 if timed out, and positive (at least 1, or number of + * jiffies left till timeout) if completed. */ unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) @@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout); * * This waits for completion of a specific task to be signaled. It is * interruptible. + * + * The return value is -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_interruptible(struct completion *x) { @@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); * * This waits for either a completion of a specific task to be signaled or for a * specified timeout to expire. It is interruptible. The timeout is in jiffies. + * + * The return value is -ERESTARTSYS if interrupted, 0 if timed out, + * positive (at least 1, or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_interruptible_timeout(struct completion *x, @@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); * * This waits to be signaled for completion of a specific task. It can be * interrupted by a kill signal. + * + * The return value is -ERESTARTSYS if interrupted, 0 if completed. */ int __sched wait_for_completion_killable(struct completion *x) { @@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable); * This waits for either a completion of a specific task to be * signaled or for a specified timeout to expire. It can be * interrupted by a kill signal. The timeout is in jiffies. + * + * The return value is -ERESTARTSYS if interrupted, 0 if timed out, + * positive (at least 1, or number of jiffies left till timeout) if completed. */ long __sched wait_for_completion_killable_timeout(struct completion *x, @@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) */ idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); +#if defined(CONFIG_SMP) + sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); +#endif } /* diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5c9e67923b7c..a78ed2736ba7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) list_del_leaf_cfs_rq(cfs_rq); } +static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) +{ + long tg_weight; + + /* + * Use this CPU's actual weight instead of the last load_contribution + * to gain a more accurate current total weight. See + * update_cfs_rq_load_contribution(). + */ + tg_weight = atomic_read(&tg->load_weight); + tg_weight -= cfs_rq->load_contribution; + tg_weight += cfs_rq->load.weight; + + return tg_weight; +} + static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) { - long load_weight, load, shares; + long tg_weight, load, shares; + tg_weight = calc_tg_weight(tg, cfs_rq); load = cfs_rq->load.weight; - load_weight = atomic_read(&tg->load_weight); - load_weight += load; - load_weight -= cfs_rq->load_contribution; - shares = (tg->shares * load); - if (load_weight) - shares /= load_weight; + if (tg_weight) + shares /= tg_weight; if (shares < MIN_SHARES) shares = MIN_SHARES; @@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) { - if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running) + if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) return; __return_cfs_rq_runtime(cfs_rq); @@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p) * Adding load to a group doesn't make a group heavier, but can cause movement * of group shares between cpus. Assuming the shares were perfectly aligned one * can calculate the shift in shares. + * + * Calculate the effective load difference if @wl is added (subtracted) to @tg + * on this @cpu and results in a total addition (subtraction) of @wg to the + * total group weight. + * + * Given a runqueue weight distribution (rw_i) we can compute a shares + * distribution (s_i) using: + * + * s_i = rw_i / \Sum rw_j (1) + * + * Suppose we have 4 CPUs and our @tg is a direct child of the root group and + * has 7 equal weight tasks, distributed as below (rw_i), with the resulting + * shares distribution (s_i): + * + * rw_i = { 2, 4, 1, 0 } + * s_i = { 2/7, 4/7, 1/7, 0 } + * + * As per wake_affine() we're interested in the load of two CPUs (the CPU the + * task used to run on and the CPU the waker is running on), we need to + * compute the effect of waking a task on either CPU and, in case of a sync + * wakeup, compute the effect of the current task going to sleep. + * + * So for a change of @wl to the local @cpu with an overall group weight change + * of @wl we can compute the new shares distribution (s'_i) using: + * + * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2) + * + * Suppose we're interested in CPUs 0 and 1, and want to compute the load + * differences in waking a task to CPU 0. The additional task changes the + * weight and shares distributions like: + * + * rw'_i = { 3, 4, 1, 0 } + * s'_i = { 3/8, 4/8, 1/8, 0 } + * + * We can then compute the difference in effective weight by using: + * + * dw_i = S * (s'_i - s_i) (3) + * + * Where 'S' is the group weight as seen by its parent. + * + * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7) + * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 - + * 4/7) times the weight of the group. */ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) { struct sched_entity *se = tg->se[cpu]; - if (!tg->parent) + if (!tg->parent) /* the trivial, non-cgroup case */ return wl; for_each_sched_entity(se) { - long lw, w; + long w, W; tg = se->my_q->tg; - w = se->my_q->load.weight; - /* use this cpu's instantaneous contribution */ - lw = atomic_read(&tg->load_weight); - lw -= se->my_q->load_contribution; - lw += w + wg; + /* + * W = @wg + \Sum rw_j + */ + W = wg + calc_tg_weight(tg, se->my_q); - wl += w; + /* + * w = rw_i + @wl + */ + w = se->my_q->load.weight + wl; - if (lw > 0 && wl < lw) - wl = (wl * tg->shares) / lw; + /* + * wl = S * s'_i; see (2) + */ + if (W > 0 && w < W) + wl = (w * tg->shares) / W; else wl = tg->shares; - /* zero point is MIN_SHARES */ + /* + * Per the above, wl is the new se->load.weight value; since + * those are clipped to [MIN_SHARES, ...) do so now. See + * calc_cfs_shares(). + */ if (wl < MIN_SHARES) wl = MIN_SHARES; + + /* + * wl = dw_i = S * (s'_i - s_i); see (3) + */ wl -= se->load.weight; + + /* + * Recursively apply this logic to all parent groups to compute + * the final effective load change on the root group. Since + * only the @tg group gets extra weight, all parent groups can + * only redistribute existing shares. @wl is the shift in shares + * resulting from this level per the above. + */ wg = 0; } @@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target) int cpu = smp_processor_id(); int prev_cpu = task_cpu(p); struct sched_domain *sd; - int i; + struct sched_group *sg; + int i, smt = 0; /* * If the task is going to be woken-up on this cpu and if it is @@ -2269,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target) * Otherwise, iterate the domains and find an elegible idle cpu. */ rcu_read_lock(); +again: for_each_domain(target, sd) { - if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) - break; + if (!smt && (sd->flags & SD_SHARE_CPUPOWER)) + continue; - for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) { - if (idle_cpu(i)) { - target = i; - break; + if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) { + if (!smt) { + smt = 1; + goto again; } + break; } - /* - * Lets stop looking for an idle sibling when we reached - * the domain that spans the current cpu and prev_cpu. - */ - if (cpumask_test_cpu(cpu, sched_domain_span(sd)) && - cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) - break; + sg = sd->groups; + do { + if (!cpumask_intersects(sched_group_cpus(sg), + tsk_cpus_allowed(p))) + goto next; + + for_each_cpu(i, sched_group_cpus(sg)) { + if (!idle_cpu(i)) + goto next; + } + + target = cpumask_first_and(sched_group_cpus(sg), + tsk_cpus_allowed(p)); + goto done; +next: + sg = sg->next; + } while (sg != sd->groups); } +done: rcu_read_unlock(); return target; @@ -3511,7 +3602,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd, } /** - * update_sd_lb_stats - Update sched_group's statistics for load balancing. + * update_sd_lb_stats - Update sched_domain's statistics for load balancing. * @sd: sched_domain whose statistics are to be updated. * @this_cpu: Cpu for which load balance is currently performed. * @idle: Idle status of this_cpu diff --git a/kernel/sched_features.h b/kernel/sched_features.h index efa0a7b75dde..84802245abd2 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1) SCHED_FEAT(TTWU_QUEUE, 1) SCHED_FEAT(FORCE_SD_OVERLAP, 0) +SCHED_FEAT(RT_RUNTIME_SHARE, 1) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 056cbd2e2a27..583a1368afe6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; + if (!sched_feat(RT_RUNTIME_SHARE)) + return more; + if (rt_rq->rt_time > rt_rq->rt_runtime) { raw_spin_unlock(&rt_rq->rt_runtime_lock); more = do_balance_runtime(rt_rq); diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index c436e790b21b..8a46f5d64504 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) struct alarm *alarm; ktime_t expired = next->expires; - if (expired.tv64 >= now.tv64) + if (expired.tv64 > now.tv64) break; alarm = container_of(next, struct alarm, node); diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1ecd6ba36d6c..c4eb71c8b2ea 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -387,6 +387,7 @@ void clockevents_exchange_device(struct clock_event_device *old, * released list and do a notify add later. */ if (old) { + old->event_handler = clockevents_handle_noop; clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); list_del(&old->list); list_add(&old->list, &clockevents_released); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index cfc65e1eb9fb..da2f760e780c 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -548,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs) * note a margin of 12.5% is used because this can be computed with * a shift, versus say 10% which would require division. */ - return max_nsecs - (max_nsecs >> 5); + return max_nsecs - (max_nsecs >> 3); } #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET @@ -669,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) * ~ 0.06ppm granularity for NTP. We apply the same 12.5% * margin as we do in clocksource_max_deferment() */ - sec = (cs->mask - (cs->mask >> 5)); + sec = (cs->mask - (cs->mask >> 3)); do_div(sec, freq); do_div(sec, scale); if (!sec) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f954282d9a82..fd4a7b1625a2 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev) (dev->features & CLOCK_EVT_FEAT_C3STOP)) return 0; - clockevents_exchange_device(NULL, dev); + clockevents_exchange_device(tick_broadcast_device.evtdev, dev); tick_broadcast_device.evtdev = dev; if (!cpumask_empty(tick_get_broadcast_mask())) tick_broadcast_start_periodic(dev); diff --git a/kernel/timer.c b/kernel/timer.c index dbaa62422b13..9c3c62b0c4bc 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid) int pid; rcu_read_lock(); - pid = task_tgid_vnr(current->real_parent); + pid = task_tgid_vnr(rcu_dereference(current->real_parent)); rcu_read_unlock(); return pid; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 900b409543db..b1e8943fed1d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -152,7 +152,6 @@ void clear_ftrace_function(void) ftrace_pid_function = ftrace_stub; } -#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST /* * For those archs that do not test ftrace_trace_stop in their @@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, if (!src->count) { free_ftrace_hash_rcu(*dst); rcu_assign_pointer(*dst, EMPTY_HASH); - return 0; + /* still need to update the function records */ + ret = 0; + goto out; } /* diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 581876f9f387..c212a7f934ec 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events) /* First see if we did not already create this dir */ list_for_each_entry(system, &event_subsystems, list) { if (strcmp(system->name, name) == 0) { - __get_system(system); system->nr_events++; return system->entry; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 816d3d074979..95dc31efd6dd 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system, */ err = replace_preds(call, NULL, ps, filter_string, true); if (err) - goto fail; + call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; + else + call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; } list_for_each_entry(call, &ftrace_events, list) { @@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system, if (strcmp(call->class->system, system->name) != 0) continue; + if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) + continue; + filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); if (!filter_item) goto fail_mem; @@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system, * replace the filter for the call. */ filter = call->filter; - call->filter = filter_item->filter; + rcu_assign_pointer(call->filter, filter_item->filter); filter_item->filter = filter; fail = false; @@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) filter = call->filter; if (!filter) goto out_unlock; - call->filter = NULL; + RCU_INIT_POINTER(call->filter, NULL); /* Make sure the filter is not being used */ synchronize_sched(); __free_filter(filter); @@ -1782,7 +1787,7 @@ out: * string */ tmp = call->filter; - call->filter = filter; + rcu_assign_pointer(call->filter, filter); if (tmp) { /* Make sure the call is done with the filter */ synchronize_sched(); diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 74c6c7fce749..fea790a2b176 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket, static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) { - return ((a->dev_addr == a->dev_addr) && + return ((a->dev_addr == b->dev_addr) && (a->dev == b->dev)) ? true : false; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4298abaae153..36b3d988b4ef 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage) static void khugepaged_alloc_sleep(void) { - DEFINE_WAIT(wait); - add_wait_queue(&khugepaged_wait, &wait); - schedule_timeout_interruptible( - msecs_to_jiffies( - khugepaged_alloc_sleep_millisecs)); - remove_wait_queue(&khugepaged_wait, &wait); + wait_event_freezable_timeout(khugepaged_wait, false, + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); } #ifndef CONFIG_NUMA @@ -2313,14 +2309,10 @@ static void khugepaged_loop(void) if (unlikely(kthread_should_stop())) break; if (khugepaged_has_work()) { - DEFINE_WAIT(wait); if (!khugepaged_scan_sleep_millisecs) continue; - add_wait_queue(&khugepaged_wait, &wait); - schedule_timeout_interruptible( - msecs_to_jiffies( - khugepaged_scan_sleep_millisecs)); - remove_wait_queue(&khugepaged_wait, &wait); + wait_event_freezable_timeout(khugepaged_wait, false, + msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); } else if (khugepaged_enabled()) wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bb28a5f9db8d..73f17c0293c0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) __SetPageHead(page); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { __SetPageTail(p); + set_page_count(p, 0); p->first_page = page; } } diff --git a/mm/migrate.c b/mm/migrate.c index 578e29174fa6..177aca424a06 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (anon_vma) put_anon_vma(anon_vma); -out: unlock_page(hpage); +out: if (rc != -EAGAIN) { list_del(&hpage->lru); put_page(hpage); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9dd443d89d8b..2b8ba3aebf6e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order) __SetPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; - __SetPageTail(p); + set_page_count(p, 0); p->first_page = page; } } @@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone) unsigned long block_migratetype; int reserve; - /* Get the start pfn, end pfn and the number of blocks to reserve */ + /* + * Get the start pfn, end pfn and the number of blocks to reserve + * We have to be careful to be aligned to pageblock_nr_pages to + * make sure that we always check pfn_valid for the first page in + * the block. + */ start_pfn = zone->zone_start_pfn; end_pfn = start_pfn + zone->spanned_pages; + start_pfn = roundup(start_pfn, pageblock_nr_pages); reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> pageblock_order; diff --git a/mm/slab.c b/mm/slab.c index 708efe886154..83311c9aaf9d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -595,6 +595,7 @@ static enum { PARTIAL_AC, PARTIAL_L3, EARLY, + LATE, FULL } g_cpucache_up; @@ -671,7 +672,7 @@ static void init_node_lock_keys(int q) { struct cache_sizes *s = malloc_sizes; - if (g_cpucache_up != FULL) + if (g_cpucache_up < LATE) return; for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { @@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; + g_cpucache_up = LATE; + /* Annotate slab for lockdep -- annotate the malloc caches */ init_lock_keys(); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3231bf332878..1d8b32f07139 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, goto fail; addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); + if (!addr) + return NULL; /* * In this function, newly allocated vm_struct is not added diff --git a/mm/vmscan.c b/mm/vmscan.c index a1893c050795..f54a05b7a61d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone, */ void register_shrinker(struct shrinker *shrinker) { - shrinker->nr = 0; + atomic_long_set(&shrinker->nr_in_batch, 0); down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); @@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink, list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; - unsigned long total_scan; - unsigned long max_pass; + long total_scan; + long max_pass; int shrink_ret = 0; long nr; long new_nr; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; + max_pass = do_shrinker_shrink(shrinker, shrink, 0); + if (max_pass <= 0) + continue; + /* * copy the current shrinker scan count into a local variable * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ - do { - nr = shrinker->nr; - } while (cmpxchg(&shrinker->nr, nr, 0) != nr); + nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); total_scan = nr; - max_pass = do_shrinker_shrink(shrinker, shrink, 0); delta = (4 * nr_pages_scanned) / shrinker->seeks; delta *= max_pass; do_div(delta, lru_pages + 1); @@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink, * manner that handles concurrent updates. If we exhausted the * scan, there is no need to do an update. */ - do { - nr = shrinker->nr; - new_nr = total_scan + nr; - if (total_scan <= 0) - break; - } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr); + if (total_scan > 0) + new_nr = atomic_long_add_return(total_scan, + &shrinker->nr_in_batch); + else + new_nr = atomic_long_read(&shrinker->nr_in_batch); trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e5f9ece3c9a0..a1daf8227ed1 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -18,6 +18,7 @@ #include <net/sock.h> #include "br_private.h" +#include "br_private_stp.h" static inline size_t br_nlmsg_size(void) { @@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) p->state = new_state; br_log_state(p); + + spin_lock_bh(&p->br->lock); + br_port_state_selection(p->br); + spin_unlock_bh(&p->br->lock); + br_ifinfo_notify(RTM_NEWLINK, p); return 0; diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index ad0a3f7cf6cc..dd147d78a588 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br) struct net_bridge_port *p; unsigned int liveports = 0; - /* Don't change port states if userspace is handling STP */ - if (br->stp_enabled == BR_USER_STP) - return; - list_for_each_entry(p, &br->port_list, list) { if (p->state == BR_STATE_DISABLED) continue; - if (p->port_no == br->root_port) { - p->config_pending = 0; - p->topology_change_ack = 0; - br_make_forwarding(p); - } else if (br_is_designated_port(p)) { - del_timer(&p->message_age_timer); - br_make_forwarding(p); - } else { - p->config_pending = 0; - p->topology_change_ack = 0; - br_make_blocking(p); + /* Don't change port states if userspace is handling STP */ + if (br->stp_enabled != BR_USER_STP) { + if (p->port_no == br->root_port) { + p->config_pending = 0; + p->topology_change_ack = 0; + br_make_forwarding(p); + } else if (br_is_designated_port(p)) { + del_timer(&p->message_age_timer); + br_make_forwarding(p); + } else { + p->config_pending = 0; + p->topology_change_ack = 0; + br_make_blocking(p); + } } if (p->state == BR_STATE_FORWARDING) diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index f39921171d0d..d3ca87bf23b7 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c @@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) { - int tmp; u16 chks; u16 len; + __le16 data; + struct cffrml *this = container_obj(layr); if (this->dofcs) { chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); - tmp = cpu_to_le16(chks); - cfpkt_add_trail(pkt, &tmp, 2); + data = cpu_to_le16(chks); + cfpkt_add_trail(pkt, &data, 2); } else { cfpkt_pad_trail(pkt, 2); } len = cfpkt_getlen(pkt); - tmp = cpu_to_le16(len); - cfpkt_add_head(pkt, &tmp, 2); + data = cpu_to_le16(len); + cfpkt_add_head(pkt, &data, 2); cfpkt_info(pkt)->hdr_len += 2; if (cfpkt_erroneous(pkt)) { pr_err("Packet is erroneous!\n"); diff --git a/net/core/dev.c b/net/core/dev.c index 6ba50a1e404c..5a13edfc9f73 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1396,7 +1396,7 @@ rollback: for_each_net(net) { for_each_netdev(net, dev) { if (dev == last) - break; + goto outroll; if (dev->flags & IFF_UP) { nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); @@ -1407,6 +1407,7 @@ rollback: } } +outroll: raw_notifier_chain_unregister(&netdev_chain, nb); goto unlock; } @@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file) sizeof(struct dev_iter_state)); } +int dev_seq_open_ops(struct inode *inode, struct file *file, + const struct seq_operations *ops) +{ + return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state)); +} + static const struct file_operations dev_seq_fops = { .owner = THIS_MODULE, .open = dev_seq_open, diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 277faef9148d..febba516db62 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = { static int dev_mc_seq_open(struct inode *inode, struct file *file) { - return seq_open_net(inode, file, &dev_mc_seq_ops, - sizeof(struct seq_net_private)); + return dev_seq_open_ops(inode, file, &dev_mc_seq_ops); } static const struct file_operations dev_mc_seq_fops = { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 039d51e6c284..5ac07d31fbc9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq, struct net *net = seq_file_net(seq); struct neigh_table *tbl = state->tbl; - pn = pn->next; + do { + pn = pn->next; + } while (pn && !net_eq(pneigh_net(pn), net)); + while (!pn) { if (++state->bucket > PNEIGH_HASHMASK) break; diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 182236b2510a..9b570a6a33c5 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -26,10 +26,11 @@ * but then some measure against one socket starving all other sockets * would be needed. * - * It was 128 by default. Experiments with real servers show, that + * The minimum value of it is 128. Experiments with real servers show that * it is absolutely not enough even at 100conn/sec. 256 cures most - * of problems. This value is adjusted to 128 for very small machines - * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). + * of problems. + * This value is adjusted to 128 for low memory machines, + * and it will increase in proportion to the memory of machine. * Note : Dont forget somaxconn that may limit backlog too. */ int sysctl_max_syn_backlog = 256; diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 025233de25f9..925991ae6f52 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -19,6 +19,7 @@ static int __init net_secret_init(void) } late_initcall(net_secret_init); +#ifdef CONFIG_INET static u32 seq_scale(u32 seq) { /* @@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq) */ return seq + (ktime_to_ns(ktime_get_real()) >> 6); } +#endif #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 18a3cebb753d..3c30ee4a5710 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb) * @shiftlen: shift up to this many bytes * * Attempts to shift up to shiftlen worth of bytes, which may be less than - * the length of the skb, from tgt to skb. Returns number bytes shifted. + * the length of the skb, from skb to tgt. Returns number bytes shifted. * It's up to caller to free skb if everything was shifted. * * If @tgt runs out of frags, the whole operation is aborted. diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 90a919afbed7..3f4e5414c8e5 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { + err = PTR_ERR(rt); rt = NULL; goto failure; } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index a77d16158eb6..94f4ec036669 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline; static int dn_dst_gc(struct dst_ops *ops); static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); -static unsigned int dn_dst_default_mtu(const struct dst_entry *dst); +static unsigned int dn_dst_mtu(const struct dst_entry *dst); static void dn_dst_destroy(struct dst_entry *); static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); static void dn_dst_link_failure(struct sk_buff *); @@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = { .gc = dn_dst_gc, .check = dn_dst_check, .default_advmss = dn_dst_default_advmss, - .default_mtu = dn_dst_default_mtu, + .mtu = dn_dst_mtu, .cow_metrics = dst_cow_metrics_generic, .destroy = dn_dst_destroy, .negative_advice = dn_dst_negative_advice, @@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); } -static unsigned int dn_dst_default_mtu(const struct dst_entry *dst) +static unsigned int dn_dst_mtu(const struct dst_entry *dst) { - return dst->dev->mtu; + unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); + + return mtu ? : dst->dev->mtu; } static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c index 67f691bd4acf..d9c150cc59a9 100644 --- a/net/decnet/dn_timer.c +++ b/net/decnet/dn_timer.c @@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg); void dn_start_slow_timer(struct sock *sk) { - sk->sk_timer.expires = jiffies + SLOW_INTERVAL; - sk->sk_timer.function = dn_slow_timer; - sk->sk_timer.data = (unsigned long)sk; - - add_timer(&sk->sk_timer); + setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk); + sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); } void dn_stop_slow_timer(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); } static void dn_slow_timer(unsigned long arg) @@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg) struct sock *sk = (struct sock *)arg; struct dn_scp *scp = DN_SK(sk); - sock_hold(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { - sk->sk_timer.expires = jiffies + HZ / 10; - add_timer(&sk->sk_timer); + sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10); goto out; } @@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg) scp->keepalive_fxn(sk); } - sk->sk_timer.expires = jiffies + SLOW_INTERVAL; - - add_timer(&sk->sk_timer); + sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL); out: bh_unlock_sock(sk); sock_put(sk); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index c6b5092f29a1..65f01dc47565 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { + int old_value = *(int *)ctl->data; int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + int new_value = *(int *)ctl->data; if (write) { struct ipv4_devconf *cnf = ctl->extra1; @@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, if (cnf == net->ipv4.devconf_dflt) devinet_copy_dflt_conf(net, i); + if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1) + if ((new_value == 0) && (old_value != 0)) + rt_cache_flush(net, 0); } return ret; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index c7472eff2d51..b2ca095cb9da 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1716,7 +1716,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, if (err) { int j; - pmc->sfcount[sfmode]--; + if (!delta) + pmc->sfcount[sfmode]--; for (j=0; j<i; j++) (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]); } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 68e8ac514383..ccee270a9b65 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk, icsk->icsk_ca_ops->name); } - if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6)) - RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); - r->idiag_family = sk->sk_family; r->idiag_state = sk->sk_state; r->idiag_timer = 0; @@ -125,16 +122,23 @@ static int inet_csk_diag_fill(struct sock *sk, r->id.idiag_src[0] = inet->inet_rcv_saddr; r->id.idiag_dst[0] = inet->inet_daddr; + /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, + * hence this needs to be included regardless of socket family. + */ + if (ext & (1 << (INET_DIAG_TOS - 1))) + RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); + #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) if (r->idiag_family == AF_INET6) { const struct ipv6_pinfo *np = inet6_sk(sk); + if (ext & (1 << (INET_DIAG_TCLASS - 1))) + RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); + ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, &np->rcv_saddr); ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, &np->daddr); - if (ext & (1 << (INET_DIAG_TCLASS - 1))) - RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); } #endif diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 3b34d1c86270..29a07b6c7168 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb) rt = skb_rtable(skb); - if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway) + if (opt->is_strictroute && opt->nexthop != rt->rt_gateway) goto sr_failed; if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 05d20cca9d66..1e60f7679075 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb) ) { if (srrptr + 3 > srrspace) break; - if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0) + if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0) break; } if (srrptr + 3 <= srrspace) { opt->is_changed = 1; ip_rt_get_source(&optptr[srrptr-1], skb, rt); + ip_hdr(skb)->daddr = opt->nexthop; optptr[2] = srrptr+4; } else if (net_ratelimit()) printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); @@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) } if (srrptr <= srrspace) { opt->srr_is_hit = 1; - iph->daddr = nexthop; + opt->nexthop = nexthop; opt->is_changed = 1; } return 0; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 9899619ab9b8..4f47e064e262 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && - pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) + pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), + 0, GFP_ATOMIC)) return -1; return 0; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 1dfc18a03fd4..f19f2182894c 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL # raw + specific targets config IP_NF_RAW tristate 'raw table support (required for NOTRACK/TRACE)' - depends on NETFILTER_ADVANCED help This option adds a `raw' table to iptables. This table is the very first in the netfilter framework and hooks in at the PREROUTING diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0c74da8a0473..46af62363b8c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -112,7 +112,7 @@ #include <net/secure_seq.h> #define RT_FL_TOS(oldflp4) \ - ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) + ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) #define IP_MAX_MTU 0xFFF0 @@ -131,6 +131,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256; static int rt_chain_length_max __read_mostly = 20; +static int redirect_genid; /* * Interface to generic destination cache. @@ -138,7 +139,7 @@ static int rt_chain_length_max __read_mostly = 20; static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ipv4_default_advmss(const struct dst_entry *dst); -static unsigned int ipv4_default_mtu(const struct dst_entry *dst); +static unsigned int ipv4_mtu(const struct dst_entry *dst); static void ipv4_dst_destroy(struct dst_entry *dst); static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); @@ -193,7 +194,7 @@ static struct dst_ops ipv4_dst_ops = { .gc = rt_garbage_collect, .check = ipv4_dst_check, .default_advmss = ipv4_default_advmss, - .default_mtu = ipv4_default_mtu, + .mtu = ipv4_mtu, .cow_metrics = ipv4_cow_metrics, .destroy = ipv4_dst_destroy, .ifdown = ipv4_dst_ifdown, @@ -416,9 +417,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) else { struct rtable *r = v; struct neighbour *n; - int len; + int len, HHUptod; + rcu_read_lock(); n = dst_get_neighbour(&r->dst); + HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0; + rcu_read_unlock(); + seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", r->dst.dev ? r->dst.dev->name : "*", @@ -432,7 +437,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) dst_metric(&r->dst, RTAX_RTTVAR)), r->rt_key_tos, -1, - (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0, + HHUptod, r->rt_spec_dst, &len); seq_printf(seq, "%*s\n", 127 - len, ""); @@ -837,6 +842,7 @@ static void rt_cache_invalidate(struct net *net) get_random_bytes(&shuffle, sizeof(shuffle)); atomic_add(shuffle + 1U, &net->ipv4.rt_genid); + redirect_genid++; } /* @@ -1304,7 +1310,7 @@ static void rt_del(unsigned hash, struct rtable *rt) spin_unlock_bh(rt_hash_lock_addr(hash)); } -static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) +static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) { struct rtable *rt = (struct rtable *) dst; __be32 orig_gw = rt->rt_gateway; @@ -1315,21 +1321,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) rt->rt_gateway = peer->redirect_learned.a4; n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); - if (IS_ERR(n)) - return PTR_ERR(n); + if (IS_ERR(n)) { + rt->rt_gateway = orig_gw; + return; + } old_n = xchg(&rt->dst._neighbour, n); if (old_n) neigh_release(old_n); - if (!n || !(n->nud_state & NUD_VALID)) { - if (n) - neigh_event_send(n, NULL); - rt->rt_gateway = orig_gw; - return -EAGAIN; + if (!(n->nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); } else { rt->rt_flags |= RTCF_REDIRECTED; call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); } - return 0; } /* called in rcu_read_lock() section */ @@ -1391,8 +1395,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, peer = rt->peer; if (peer) { - if (peer->redirect_learned.a4 != new_gw) { + if (peer->redirect_learned.a4 != new_gw || + peer->redirect_genid != redirect_genid) { peer->redirect_learned.a4 = new_gw; + peer->redirect_genid = redirect_genid; atomic_inc(&__rt_peer_genid); } check_peer_redir(&rt->dst, peer); @@ -1685,12 +1691,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) } -static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) +static void ipv4_validate_peer(struct rtable *rt) { - struct rtable *rt = (struct rtable *) dst; - - if (rt_is_expired(rt)) - return NULL; if (rt->rt_peer_genid != rt_peer_genid()) { struct inet_peer *peer; @@ -1699,17 +1701,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) peer = rt->peer; if (peer) { - check_peer_pmtu(dst, peer); + check_peer_pmtu(&rt->dst, peer); + if (peer->redirect_genid != redirect_genid) + peer->redirect_learned.a4 = 0; if (peer->redirect_learned.a4 && - peer->redirect_learned.a4 != rt->rt_gateway) { - if (check_peer_redir(dst, peer)) - return NULL; - } + peer->redirect_learned.a4 != rt->rt_gateway) + check_peer_redir(&rt->dst, peer); } rt->rt_peer_genid = rt_peer_genid(); } +} + +static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) +{ + struct rtable *rt = (struct rtable *) dst; + + if (rt_is_expired(rt)) + return NULL; + ipv4_validate_peer(rt); return dst; } @@ -1814,12 +1825,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst) return advmss; } -static unsigned int ipv4_default_mtu(const struct dst_entry *dst) +static unsigned int ipv4_mtu(const struct dst_entry *dst) { - unsigned int mtu = dst->dev->mtu; + const struct rtable *rt = (const struct rtable *) dst; + unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); + + if (mtu && rt_is_output_route(rt)) + return mtu; + + mtu = dst->dev->mtu; if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { - const struct rtable *rt = (const struct rtable *) dst; if (rt->rt_gateway != rt->rt_dst && mtu > 576) mtu = 576; @@ -1852,6 +1868,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, dst_init_metrics(&rt->dst, peer->metrics, false); check_peer_pmtu(&rt->dst, peer); + if (peer->redirect_genid != redirect_genid) + peer->redirect_learned.a4 = 0; if (peer->redirect_learned.a4 && peer->redirect_learned.a4 != rt->rt_gateway) { rt->rt_gateway = peer->redirect_learned.a4; @@ -2357,6 +2375,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->rt_mark == skb->mark && net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { + ipv4_validate_peer(rth); if (noref) { dst_use_noref(&rth->dst, jiffies); skb_dst_set_noref(skb, &rth->dst); @@ -2415,11 +2434,11 @@ EXPORT_SYMBOL(ip_route_input_common); static struct rtable *__mkroute_output(const struct fib_result *res, const struct flowi4 *fl4, __be32 orig_daddr, __be32 orig_saddr, - int orig_oif, struct net_device *dev_out, + int orig_oif, __u8 orig_rtos, + struct net_device *dev_out, unsigned int flags) { struct fib_info *fi = res->fi; - u32 tos = RT_FL_TOS(fl4); struct in_device *in_dev; u16 type = res->type; struct rtable *rth; @@ -2470,7 +2489,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, rth->rt_genid = rt_genid(dev_net(dev_out)); rth->rt_flags = flags; rth->rt_type = type; - rth->rt_key_tos = tos; + rth->rt_key_tos = orig_rtos; rth->rt_dst = fl4->daddr; rth->rt_src = fl4->saddr; rth->rt_route_iif = 0; @@ -2520,7 +2539,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) { struct net_device *dev_out = NULL; - u32 tos = RT_FL_TOS(fl4); + __u8 tos = RT_FL_TOS(fl4); unsigned int flags = 0; struct fib_result res; struct rtable *rth; @@ -2696,7 +2715,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4) make_route: rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif, - dev_out, flags); + tos, dev_out, flags); if (!IS_ERR(rth)) { unsigned int hash; @@ -2732,6 +2751,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4) (IPTOS_RT_MASK | RTO_ONLINK)) && net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { + ipv4_validate_peer(rth); dst_use(&rth->dst, jiffies); RT_CACHE_STAT_INC(out_hit); rcu_read_unlock_bh(); @@ -2755,9 +2775,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo return NULL; } -static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst) +static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) { - return 0; + unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); + + return mtu ? : dst->dev->mtu; } static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) @@ -2775,7 +2797,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .protocol = cpu_to_be16(ETH_P_IP), .destroy = ipv4_dst_destroy, .check = ipv4_blackhole_dst_check, - .default_mtu = ipv4_blackhole_default_mtu, + .mtu = ipv4_blackhole_mtu, .default_advmss = ipv4_default_advmss, .update_pmtu = ipv4_rt_blackhole_update_pmtu, .cow_metrics = ipv4_rt_blackhole_cow_metrics, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ab0966df1e2a..5a65eeac1d29 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; - unsigned int ulen; + unsigned int ulen, copied; int peeked; int err; int is_udplite = IS_UDPLITE(sk); @@ -1186,9 +1186,10 @@ try_again: goto out; ulen = skb->len - sizeof(struct udphdr); - if (len > ulen) - len = ulen; - else if (len < ulen) + copied = len; + if (copied > ulen) + copied = ulen; + else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* @@ -1197,14 +1198,14 @@ try_again: * coverage checksum (UDP-Lite), do it before the copy. */ - if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), - msg->msg_iov, len); + msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), @@ -1233,7 +1234,7 @@ try_again: if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); - err = len; + err = copied; if (flags & MSG_TRUNC) err = ulen; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index fee46d5a2f12..1567fb120392 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk, * request_sock (formerly open request) hash tables. */ static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, - const u32 rnd, const u16 synq_hsize) + const u32 rnd, const u32 synq_hsize) { u32 c; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index c99e3ee9781f..26cb08c84b74 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -503,7 +503,7 @@ done: goto e_inval; if (val > 255 || val < -1) goto e_inval; - np->mcast_hops = val; + np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); retv = 0; break; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 44e5b7f2a6c1..0cb78d7ddaf5 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1571,7 +1571,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, } if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); - if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) + if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) goto release; if (dev->addr_len) { diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 448464844a25..f792b34cbe9c 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -186,7 +186,6 @@ config IP6_NF_MANGLE config IP6_NF_RAW tristate 'raw table support (required for TRACE)' - depends on NETFILTER_ADVANCED help This option adds a `raw' table to ip6tables. This table is the very first in the netfilter framework and hooks in at the PREROUTING diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8473016bba4a..3399dd326287 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, const struct in6_addr *dest); static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ip6_default_advmss(const struct dst_entry *dst); -static unsigned int ip6_default_mtu(const struct dst_entry *dst); +static unsigned int ip6_mtu(const struct dst_entry *dst); static struct dst_entry *ip6_negative_advice(struct dst_entry *); static void ip6_dst_destroy(struct dst_entry *); static void ip6_dst_ifdown(struct dst_entry *, @@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = { .gc_thresh = 1024, .check = ip6_dst_check, .default_advmss = ip6_default_advmss, - .default_mtu = ip6_default_mtu, + .mtu = ip6_mtu, .cow_metrics = ipv6_cow_metrics, .destroy = ip6_dst_destroy, .ifdown = ip6_dst_ifdown, @@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = { .neigh_lookup = ip6_neigh_lookup, }; -static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst) +static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) { - return 0; + unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); + + return mtu ? : dst->dev->mtu; } static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) @@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { .protocol = cpu_to_be16(ETH_P_IPV6), .destroy = ip6_dst_destroy, .check = ip6_dst_check, - .default_mtu = ip6_blackhole_default_mtu, + .mtu = ip6_blackhole_mtu, .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, .cow_metrics = ip6_rt_blackhole_cow_metrics, @@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst) return mtu; } -static unsigned int ip6_default_mtu(const struct dst_entry *dst) +static unsigned int ip6_mtu(const struct dst_entry *dst) { - unsigned int mtu = IPV6_MIN_MTU; struct inet6_dev *idev; + unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); + + if (mtu) + return mtu; + + mtu = IPV6_MIN_MTU; rcu_read_lock(); idev = __in6_dev_get(dst->dev); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 36131d122a6f..2dea4bb7b54a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1255,6 +1255,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!want_cookie || tmp_opt.tstamp_ok) TCP_ECN_create_request(req, tcp_hdr(skb)); + treq->iif = sk->sk_bound_dev_if; + + /* So that link locals have meaning */ + if (!sk->sk_bound_dev_if && + ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) + treq->iif = inet6_iif(skb); + if (!isn) { struct inet_peer *peer = NULL; @@ -1264,12 +1271,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) atomic_inc(&skb->users); treq->pktopts = skb; } - treq->iif = sk->sk_bound_dev_if; - - /* So that link locals have meaning */ - if (!sk->sk_bound_dev_if && - ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) - treq->iif = inet6_iif(skb); if (want_cookie) { isn = cookie_v6_init_sequence(sk, skb, &req->mss); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 846f4757eb8d..8c2541915183 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; - unsigned int ulen; + unsigned int ulen, copied; int peeked; int err; int is_udplite = IS_UDPLITE(sk); @@ -363,9 +363,10 @@ try_again: goto out; ulen = skb->len - sizeof(struct udphdr); - if (len > ulen) - len = ulen; - else if (len < ulen) + copied = len; + if (copied > ulen) + copied = ulen; + else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; is_udp4 = (skb->protocol == htons(ETH_P_IP)); @@ -376,14 +377,14 @@ try_again: * coverage checksum (UDP-Lite), do it before the copy. */ - if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { + if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), - msg->msg_iov,len); + msg->msg_iov, copied ); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) @@ -432,7 +433,7 @@ try_again: datagram_recv_ctl(sk, msg, skb); } - err = len; + err = copied; if (flags & MSG_TRUNC) err = ulen; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index cf0f308abf5e..89ff8c67943e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len /* Get routing info from the tunnel socket */ skb_dst_drop(skb); - skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); + skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); inet = inet_sk(sk); fl = &inet->cork.fl; diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index b3f65520e7a7..b064e4df12c6 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -161,6 +161,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, return -ENOENT; } + /* if we're already stopping ignore any new requests to stop */ + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { + spin_unlock_bh(&sta->lock); + return -EALREADY; + } + if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { /* not even started yet! */ ieee80211_assign_tid_tx(sta, tid, NULL); @@ -169,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, return 0; } + set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); + spin_unlock_bh(&sta->lock); #ifdef CONFIG_MAC80211_HT_DEBUG @@ -176,8 +184,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, sta->sta.addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ - set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); - del_timer_sync(&tid_tx->addba_resp_timer); /* @@ -187,6 +193,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, */ clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); + /* + * There might be a few packets being processed right now (on + * another CPU) that have already gotten past the aggregation + * check when it was still OPERATIONAL and consequently have + * IEEE80211_TX_CTL_AMPDU set. In that case, this code might + * call into the driver at the same time or even before the + * TX paths calls into it, which could confuse the driver. + * + * Wait for all currently running TX paths to finish before + * telling the driver. New packets will not go through since + * the aggregation session is no longer OPERATIONAL. + */ + synchronize_net(); + tid_tx->stop_initiator = initiator; tid_tx->tx_stop = tx; @@ -757,11 +777,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, goto out; } - del_timer(&tid_tx->addba_resp_timer); + del_timer_sync(&tid_tx->addba_resp_timer); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); #endif + + /* + * addba_resp_timer may have fired before we got here, and + * caused WANT_STOP to be set. If the stop then was already + * processed further, STOPPING might be set. + */ + if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || + test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { +#ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG + "got addBA resp for tid %d but we already gave up\n", + tid); +#endif + goto out; + } + /* * IEEE 802.11-2007 7.3.1.14: * In an ADDBA Response frame, when the Status Code field diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index c5f341798c16..3110cbdc501b 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); - PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " - "3839 bytes"); PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " + "3839 bytes"); + PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " "7935 bytes"); /* diff --git a/net/mac80211/main.c b/net/mac80211/main.c index d999bf3b84e1..cae443563ec9 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -757,6 +757,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (!local->int_scan_req) return -ENOMEM; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!local->hw.wiphy->bands[band]) + continue; + local->int_scan_req->rates[band] = (u32) -1; + } + /* if low-level driver supports AP, we also support VLAN */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) { hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 80de436eae20..16518f386117 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_radiotap_header *rthdr; unsigned char *pos; - __le16 txflags; + u16 txflags; rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len); @@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band txflags = 0; if (!(info->flags & IEEE80211_TX_STAT_ACK) && !is_multicast_ether_addr(hdr->addr1)) - txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL); + txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) - txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS); + txflags |= IEEE80211_RADIOTAP_F_TX_CTS; else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) - txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS); + txflags |= IEEE80211_RADIOTAP_F_TX_RTS; put_unaligned_le16(txflags, pos); pos += 2; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index eca0fad09709..d5230ecc784d 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1039,7 +1039,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) struct ieee80211_sub_if_data, u.ap); - memset(&sta->sta.drv_priv, 0, hw->sta_data_size); WARN_ON(drv_sta_add(local, sdata, &sta->sta)); } } diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 8260b13d93c9..d5597b759ba3 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST config NF_CONNTRACK_NETBIOS_NS tristate "NetBIOS name service protocol support" - depends on NETFILTER_ADVANCED select NF_CONNTRACK_BROADCAST help NetBIOS name service requests are sent as broadcast messages from an @@ -542,7 +541,6 @@ config NETFILTER_XT_TARGET_NOTRACK tristate '"NOTRACK" target support' depends on IP_NF_RAW || IP6_NF_RAW depends on NF_CONNTRACK - depends on NETFILTER_ADVANCED help The NOTRACK target allows a select rule to specify which packets *not* to enter the conntrack/NAT diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 6ee10f5d59bd..37d667e3f6f8 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipport4_elem data = { }; - u32 ip, ip_to, p = 0, port, port_to; + u32 ip, ip_to = 0, p = 0, port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index fb90e344e907..e69e2718fbe1 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem data = { }; - u32 ip, ip_to, p = 0, port, port_to; + u32 ip, ip_to = 0, p = 0, port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index deb3e3dfa5fc..64199b4e93c9 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; - u32 ip, ip_to, p = 0, port, port_to; + u32 ip, ip_to = 0, p = 0, port, port_to; u32 ip2_from = 0, ip2_to, ip2_last, ip2; u32 timeout = h->timeout; bool with_ports = false; diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index 6b368be937c6..b62c4148b921 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -27,22 +27,17 @@ static DEFINE_MUTEX(nf_ct_ecache_mutex); -struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly; -EXPORT_SYMBOL_GPL(nf_conntrack_event_cb); - -struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly; -EXPORT_SYMBOL_GPL(nf_expect_event_cb); - /* deliver cached events and clear cache entry - must be called with locally * disabled softirqs */ void nf_ct_deliver_cached_events(struct nf_conn *ct) { + struct net *net = nf_ct_net(ct); unsigned long events; struct nf_ct_event_notifier *notify; struct nf_conntrack_ecache *e; rcu_read_lock(); - notify = rcu_dereference(nf_conntrack_event_cb); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); if (notify == NULL) goto out_unlock; @@ -83,19 +78,20 @@ out_unlock: } EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); -int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new) +int nf_conntrack_register_notifier(struct net *net, + struct nf_ct_event_notifier *new) { int ret = 0; struct nf_ct_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); - notify = rcu_dereference_protected(nf_conntrack_event_cb, + notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, lockdep_is_held(&nf_ct_ecache_mutex)); if (notify != NULL) { ret = -EBUSY; goto out_unlock; } - RCU_INIT_POINTER(nf_conntrack_event_cb, new); + RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new); mutex_unlock(&nf_ct_ecache_mutex); return ret; @@ -105,32 +101,34 @@ out_unlock: } EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); -void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new) +void nf_conntrack_unregister_notifier(struct net *net, + struct nf_ct_event_notifier *new) { struct nf_ct_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); - notify = rcu_dereference_protected(nf_conntrack_event_cb, + notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, lockdep_is_held(&nf_ct_ecache_mutex)); BUG_ON(notify != new); - RCU_INIT_POINTER(nf_conntrack_event_cb, NULL); + RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); } EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); -int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new) +int nf_ct_expect_register_notifier(struct net *net, + struct nf_exp_event_notifier *new) { int ret = 0; struct nf_exp_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); - notify = rcu_dereference_protected(nf_expect_event_cb, + notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, lockdep_is_held(&nf_ct_ecache_mutex)); if (notify != NULL) { ret = -EBUSY; goto out_unlock; } - RCU_INIT_POINTER(nf_expect_event_cb, new); + RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new); mutex_unlock(&nf_ct_ecache_mutex); return ret; @@ -140,15 +138,16 @@ out_unlock: } EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); -void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new) +void nf_ct_expect_unregister_notifier(struct net *net, + struct nf_exp_event_notifier *new) { struct nf_exp_event_notifier *notify; mutex_lock(&nf_ct_ecache_mutex); - notify = rcu_dereference_protected(nf_expect_event_cb, + notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, lockdep_is_held(&nf_ct_ecache_mutex)); BUG_ON(notify != new); - RCU_INIT_POINTER(nf_expect_event_cb, NULL); + RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); } EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index e58aa9b1fe8a..ef21b221f036 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -4,7 +4,7 @@ * (C) 2001 by Jay Schulist <jschlst@samba.org> * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> * (C) 2003 by Patrick Mchardy <kaber@trash.net> - * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org> + * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org> * * Initial connection tracking via netlink development funded and * generally made possible by Network Robots, Inc. (www.networkrobots.com) @@ -2163,6 +2163,54 @@ MODULE_ALIAS("ip_conntrack_netlink"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); +static int __net_init ctnetlink_net_init(struct net *net) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + int ret; + + ret = nf_conntrack_register_notifier(net, &ctnl_notifier); + if (ret < 0) { + pr_err("ctnetlink_init: cannot register notifier.\n"); + goto err_out; + } + + ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp); + if (ret < 0) { + pr_err("ctnetlink_init: cannot expect register notifier.\n"); + goto err_unreg_notifier; + } +#endif + return 0; + +#ifdef CONFIG_NF_CONNTRACK_EVENTS +err_unreg_notifier: + nf_conntrack_unregister_notifier(net, &ctnl_notifier); +err_out: + return ret; +#endif +} + +static void ctnetlink_net_exit(struct net *net) +{ +#ifdef CONFIG_NF_CONNTRACK_EVENTS + nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp); + nf_conntrack_unregister_notifier(net, &ctnl_notifier); +#endif +} + +static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list) +{ + struct net *net; + + list_for_each_entry(net, net_exit_list, exit_list) + ctnetlink_net_exit(net); +} + +static struct pernet_operations ctnetlink_net_ops = { + .init = ctnetlink_net_init, + .exit_batch = ctnetlink_net_exit_batch, +}; + static int __init ctnetlink_init(void) { int ret; @@ -2180,28 +2228,15 @@ static int __init ctnetlink_init(void) goto err_unreg_subsys; } -#ifdef CONFIG_NF_CONNTRACK_EVENTS - ret = nf_conntrack_register_notifier(&ctnl_notifier); - if (ret < 0) { - pr_err("ctnetlink_init: cannot register notifier.\n"); + if (register_pernet_subsys(&ctnetlink_net_ops)) { + pr_err("ctnetlink_init: cannot register pernet operations\n"); goto err_unreg_exp_subsys; } - ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp); - if (ret < 0) { - pr_err("ctnetlink_init: cannot expect register notifier.\n"); - goto err_unreg_notifier; - } -#endif - return 0; -#ifdef CONFIG_NF_CONNTRACK_EVENTS -err_unreg_notifier: - nf_conntrack_unregister_notifier(&ctnl_notifier); err_unreg_exp_subsys: nfnetlink_subsys_unregister(&ctnl_exp_subsys); -#endif err_unreg_subsys: nfnetlink_subsys_unregister(&ctnl_subsys); err_out: @@ -2213,11 +2248,7 @@ static void __exit ctnetlink_exit(void) pr_info("ctnetlink: unregistering from nfnetlink.\n"); nf_ct_remove_userspace_expectations(); -#ifdef CONFIG_NF_CONNTRACK_EVENTS - nf_ct_expect_unregister_notifier(&ctnl_notifier_exp); - nf_conntrack_unregister_notifier(&ctnl_notifier); -#endif - + unregister_pernet_subsys(&ctnetlink_net_ops); nfnetlink_subsys_unregister(&ctnl_exp_subsys); nfnetlink_subsys_unregister(&ctnl_subsys); } diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 9c24de10a657..824f184f7a9b 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain, struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr4_map *map4 = NULL; struct netlbl_domaddr6_map *map6 = NULL; - const struct in_addr *addr4, *mask4; - const struct in6_addr *addr6, *mask6; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) @@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain, INIT_LIST_HEAD(&addrmap->list6); switch (family) { - case AF_INET: - addr4 = addr; - mask4 = mask; + case AF_INET: { + const struct in_addr *addr4 = addr; + const struct in_addr *mask4 = mask; map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); if (map4 == NULL) goto cfg_unlbl_map_add_failure; @@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain, if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; - case AF_INET6: - addr6 = addr; - mask6 = mask; + } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case AF_INET6: { + const struct in6_addr *addr6 = addr; + const struct in6_addr *mask6 = mask; map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); if (map6 == NULL) goto cfg_unlbl_map_add_failure; @@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain, map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; ipv6_addr_copy(&map6->list.mask, mask6); map6->list.valid = 1; - ret_val = netlbl_af4list_add(&map4->list, - &addrmap->list4); + ret_val = netlbl_af6list_add(&map6->list, + &addrmap->list6); if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; + } +#endif /* IPv6 */ default: goto cfg_unlbl_map_add_failure; break; @@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net, case AF_INET: addr_len = sizeof(struct in_addr); break; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: addr_len = sizeof(struct in6_addr); break; +#endif /* IPv6 */ default: return -EPFNOSUPPORT; } @@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net, case AF_INET: addr_len = sizeof(struct in_addr); break; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: addr_len = sizeof(struct in6_addr); break; +#endif /* IPv6 */ default: return -EPFNOSUPPORT; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 6649463da1b6..d617161f8dd3 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) ctl->Plog, ctl->Scell_log, nla_data(tb[TCA_RED_STAB])); - if (skb_queue_empty(&sch->q)) - red_end_of_idle_period(&q->parms); + if (!q->qdisc->q.qlen) + red_start_of_idle_period(&q->parms); sch_tree_unlock(sch); return 0; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index a3b7120fcc74..4f4c52c0eeb3 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) static int -__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) +__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, + struct net_device *dev, struct netdev_queue *txq, + struct neighbour *mn) { - struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); - struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); - struct neighbour *mn = dst_get_neighbour(skb_dst(skb)); + struct teql_sched_data *q = qdisc_priv(txq->qdisc); struct neighbour *n = q->ncache; if (mn->tbl == NULL) @@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * } static inline int teql_resolve(struct sk_buff *skb, - struct sk_buff *skb_res, struct net_device *dev) + struct sk_buff *skb_res, + struct net_device *dev, + struct netdev_queue *txq) { - struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + struct dst_entry *dst = skb_dst(skb); + struct neighbour *mn; + int res; + if (txq->qdisc == &noop_qdisc) return -ENODEV; - if (dev->header_ops == NULL || - skb_dst(skb) == NULL || - dst_get_neighbour(skb_dst(skb)) == NULL) + if (!dev->header_ops || !dst) return 0; - return __teql_resolve(skb, skb_res, dev); + + rcu_read_lock(); + mn = dst_get_neighbour(dst); + res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0; + rcu_read_unlock(); + + return res; } static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) @@ -307,7 +316,7 @@ restart: continue; } - switch (teql_resolve(skb, skb_res, slave)) { + switch (teql_resolve(skb, skb_res, slave, slave_txq)) { case 0: if (__netif_tx_trylock(slave_txq)) { unsigned int length = qdisc_pkt_len(skb); diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 865e68fef21c..bf812048cf6f 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) struct sctp_auth_bytes *key; /* Verify that we are not going to overflow INT_MAX */ - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes))) return NULL; /* Allocate the shared key */ diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 466fbcc5cf77..b595a3d8679f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, if ((UNIXCB(skb).pid != siocb->scm->pid) || (UNIXCB(skb).cred != siocb->scm->cred)) { skb_queue_head(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk, skb->len); break; } } else { @@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, chunk = min_t(unsigned int, skb->len, size); if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk, skb->len); if (copied == 0) copied = -EFAULT; break; @@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, /* put the skb back if we didn't use it up.. */ if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk, skb->len); break; } @@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk, skb->len); break; } } while (size); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b3a476fe8272..ffafda5022c2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, - [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, - [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, + [NL80211_ATTR_MAC] = { .len = ETH_ALEN }, + [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN }, [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index e71f5a66574e..3302c56f60d1 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -57,8 +57,17 @@ #define REG_DBG_PRINT(args...) #endif +static struct regulatory_request core_request_world = { + .initiator = NL80211_REGDOM_SET_BY_CORE, + .alpha2[0] = '0', + .alpha2[1] = '0', + .intersect = false, + .processed = true, + .country_ie_env = ENVIRON_ANY, +}; + /* Receipt of information from last regulatory request */ -static struct regulatory_request *last_request; +static struct regulatory_request *last_request = &core_request_world; /* To trigger userspace events */ static struct platform_device *reg_pdev; @@ -150,7 +159,7 @@ static char user_alpha2[2]; module_param(ieee80211_regdom, charp, 0444); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); -static void reset_regdomains(void) +static void reset_regdomains(bool full_reset) { /* avoid freeing static information or freeing something twice */ if (cfg80211_regdomain == cfg80211_world_regdom) @@ -165,6 +174,13 @@ static void reset_regdomains(void) cfg80211_world_regdom = &world_regdom; cfg80211_regdomain = NULL; + + if (!full_reset) + return; + + if (last_request != &core_request_world) + kfree(last_request); + last_request = &core_request_world; } /* @@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd) { BUG_ON(!last_request); - reset_regdomains(); + reset_regdomains(false); cfg80211_world_regdom = rd; cfg80211_regdomain = rd; @@ -1407,7 +1423,8 @@ static int __regulatory_hint(struct wiphy *wiphy, } new_request: - kfree(last_request); + if (last_request != &core_request_world) + kfree(last_request); last_request = pending_request; last_request->intersect = intersect; @@ -1577,9 +1594,6 @@ static int regulatory_hint_core(const char *alpha2) { struct regulatory_request *request; - kfree(last_request); - last_request = NULL; - request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) @@ -1777,7 +1791,7 @@ static void restore_regulatory_settings(bool reset_user) mutex_lock(&cfg80211_mutex); mutex_lock(®_mutex); - reset_regdomains(); + reset_regdomains(true); restore_alpha2(alpha2, reset_user); /* @@ -2037,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) } request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); + if (!request_wiphy && + (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER || + last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) { + schedule_delayed_work(®_timeout, 0); + return -ENODEV; + } if (!last_request->intersect) { int r; if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) { - reset_regdomains(); + reset_regdomains(false); cfg80211_regdomain = rd; return 0; } @@ -2063,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) if (r) return r; - reset_regdomains(); + reset_regdomains(false); cfg80211_regdomain = rd; return 0; } @@ -2088,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) rd = NULL; - reset_regdomains(); + reset_regdomains(false); cfg80211_regdomain = intersected_rd; return 0; @@ -2108,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd) kfree(rd); rd = NULL; - reset_regdomains(); + reset_regdomains(false); cfg80211_regdomain = intersected_rd; return 0; @@ -2261,11 +2281,8 @@ void /* __init_or_exit */ regulatory_exit(void) mutex_lock(&cfg80211_mutex); mutex_lock(®_mutex); - reset_regdomains(); - - kfree(last_request); + reset_regdomains(true); - last_request = NULL; dev_set_uevent_suppress(®_pdev->dev, true); platform_device_unregister(reg_pdev); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 552df27dcf53..2118d6446630 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2382,9 +2382,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst) return dst_metric_advmss(dst->path); } -static unsigned int xfrm_default_mtu(const struct dst_entry *dst) +static unsigned int xfrm_mtu(const struct dst_entry *dst) { - return dst_mtu(dst->path); + unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); + + return mtu ? : dst_mtu(dst->path); } static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) @@ -2411,8 +2413,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->check = xfrm_dst_check; if (likely(dst_ops->default_advmss == NULL)) dst_ops->default_advmss = xfrm_default_advmss; - if (likely(dst_ops->default_mtu == NULL)) - dst_ops->default_mtu = xfrm_default_mtu; + if (likely(dst_ops->mtu == NULL)) + dst_ops->mtu = xfrm_mtu; if (likely(dst_ops->negative_advice == NULL)) dst_ops->negative_advice = xfrm_negative_advice; if (likely(dst_ops->link_failure == NULL)) diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 36cc0cc39e78..b566eba4a65c 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c @@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen) static int d_namespace_path(struct path *path, char *buf, int buflen, char **name, int flags) { - struct path root, tmp; char *res; - int connected, error = 0; + int error = 0; + int connected = 1; + + if (path->mnt->mnt_flags & MNT_INTERNAL) { + /* it's not mounted anywhere */ + res = dentry_path(path->dentry, buf, buflen); + *name = res; + if (IS_ERR(res)) { + *name = buf; + return PTR_ERR(res); + } + if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC && + strncmp(*name, "/sys/", 5) == 0) { + /* TODO: convert over to using a per namespace + * control instead of hard coded /proc + */ + return prepend(name, *name - buf, "/proc", 5); + } + return 0; + } - /* Get the root we want to resolve too, released below */ + /* resolve paths relative to chroot?*/ if (flags & PATH_CHROOT_REL) { - /* resolve paths relative to chroot */ + struct path root; get_fs_root(current->fs, &root); - } else { - /* resolve paths relative to namespace */ - root.mnt = current->nsproxy->mnt_ns->root; - root.dentry = root.mnt->mnt_root; - path_get(&root); + res = __d_path(path, &root, buf, buflen); + if (res && !IS_ERR(res)) { + /* everything's fine */ + *name = res; + path_put(&root); + goto ok; + } + path_put(&root); + connected = 0; } - tmp = root; - res = __d_path(path, &tmp, buf, buflen); + res = d_absolute_path(path, buf, buflen); *name = res; /* handle error conditions - and still allow a partial path to @@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, *name = buf; goto out; } + if (!our_mnt(path->mnt)) + connected = 0; +ok: /* Handle two cases: * 1. A deleted dentry && profile is not allowing mediation of deleted * 2. On some filesystems, newly allocated dentries appear to the @@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, goto out; } - /* Determine if the path is connected to the expected root */ - connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt; - - /* If the path is not connected, + /* If the path is not connected to the expected root, * check if it is a sysctl and handle specially else remove any * leading / that __d_path may have returned. * Unless @@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, * namespace root. */ if (!connected) { - /* is the disconnect path a sysctl? */ - if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC && - strncmp(*name, "/sys/", 5) == 0) { - /* TODO: convert over to using a per namespace - * control instead of hard coded /proc - */ - error = prepend(name, *name - buf, "/proc", 5); - } else if (!(flags & PATH_CONNECT_PATH) && + if (!(flags & PATH_CONNECT_PATH) && !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && - (tmp.mnt == current->nsproxy->mnt_ns->root && - tmp.dentry == tmp.mnt->mnt_root))) { + our_mnt(path->mnt))) { /* disconnected path, don't return pathname starting * with '/' */ @@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, } out: - path_put(&root); - return error; } diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index 738bbdf8d4c7..d9f3ced8756e 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer, { char *pos = ERR_PTR(-ENOMEM); if (buflen >= 256) { - struct path ns_root = { }; /* go to whatever namespace root we are under */ - pos = __d_path(path, &ns_root, buffer, buflen - 1); + pos = d_absolute_path(path, buffer, buflen - 1); if (!IS_ERR(pos) && *pos == '/' && pos[1]) { struct inode *inode = path->dentry->d_inode; if (inode && S_ISDIR(inode->i_mode)) { @@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path) pos = tomoyo_get_local_path(path->dentry, buf, buf_len - 1); /* Get absolute name for the rest. */ - else + else { pos = tomoyo_get_absolute_path(path, buf, buf_len - 1); + /* + * Fall back to local name if absolute name is not + * available. + */ + if (pos == ERR_PTR(-EINVAL)) + pos = tomoyo_get_local_path(path->dentry, buf, + buf_len - 1); + } encode: if (IS_ERR(pos)) continue; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 096507d2ca9a..7d98240def0b 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2508,7 +2508,6 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), - SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB), SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index cbde019d3d52..1d07e8fa2433 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -297,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx, imux = &spec->input_mux[mux_idx]; if (!imux->num_items && mux_idx > 0) imux = &spec->input_mux[0]; + if (!imux->num_items) + return 0; if (idx >= imux->num_items) idx = imux->num_items - 1; @@ -2629,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch, case AUTO_PIN_SPEAKER_OUT: if (cfg->line_outs == 1) return "Speaker"; + if (cfg->line_outs == 2) + return ch ? "Bass Speaker" : "Speaker"; break; case AUTO_PIN_HP_OUT: /* for multi-io case, only the primary out */ @@ -2902,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin) if (!nid) continue; if (found_in_nid_list(nid, spec->multiout.dac_nids, - spec->multiout.num_dacs)) + ARRAY_SIZE(spec->private_dac_nids))) continue; if (found_in_nid_list(nid, spec->multiout.hp_out_nid, ARRAY_SIZE(spec->multiout.hp_out_nid))) @@ -2923,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin) return 0; } +/* return 0 if no possible DAC is found, 1 if one or more found */ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs, const hda_nid_t *pins, hda_nid_t *dacs) { @@ -2940,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs, if (!dacs[i]) dacs[i] = alc_auto_look_for_dac(codec, pins[i]); } - return 0; + return 1; } static int alc_auto_fill_multi_ios(struct hda_codec *codec, @@ -2950,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec, static int alc_auto_fill_dac_nids(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; - const struct auto_pin_cfg *cfg = &spec->autocfg; + struct auto_pin_cfg *cfg = &spec->autocfg; bool redone = false; int i; @@ -2961,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec) spec->multiout.extra_out_nid[0] = 0; memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids)); spec->multiout.dac_nids = spec->private_dac_nids; + spec->multi_ios = 0; /* fill hard-wired DACs first */ if (!redone) { @@ -2994,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec) for (i = 0; i < cfg->line_outs; i++) { if (spec->private_dac_nids[i]) spec->multiout.num_dacs++; - else + else { memmove(spec->private_dac_nids + i, spec->private_dac_nids + i + 1, sizeof(hda_nid_t) * (cfg->line_outs - i - 1)); + spec->private_dac_nids[cfg->line_outs - 1] = 0; + } } if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { @@ -3019,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec) if (cfg->line_out_type != AUTO_PIN_HP_OUT) alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins, spec->multiout.hp_out_nid); - if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) - alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins, - spec->multiout.extra_out_nid); + if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) { + int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, + cfg->speaker_pins, + spec->multiout.extra_out_nid); + /* if no speaker volume is assigned, try again as the primary + * output + */ + if (!err && cfg->speaker_outs > 0 && + cfg->line_out_type == AUTO_PIN_HP_OUT) { + cfg->hp_outs = cfg->line_outs; + memcpy(cfg->hp_pins, cfg->line_out_pins, + sizeof(cfg->hp_pins)); + cfg->line_outs = cfg->speaker_outs; + memcpy(cfg->line_out_pins, cfg->speaker_pins, + sizeof(cfg->speaker_pins)); + cfg->speaker_outs = 0; + memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins)); + cfg->line_out_type = AUTO_PIN_SPEAKER_OUT; + redone = false; + goto again; + } + } return 0; } @@ -3171,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec, } static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin, - hda_nid_t dac, const char *pfx) + hda_nid_t dac, const char *pfx, + int cidx) { struct alc_spec *spec = codec->spec; hda_nid_t sw, vol; @@ -3187,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin, if (is_ctl_used(spec->sw_ctls, val)) return 0; /* already created */ mark_ctl_usage(spec->sw_ctls, val); - return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val); + return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val); } sw = alc_look_for_out_mute_nid(codec, pin, dac); vol = alc_look_for_out_vol_nid(codec, pin, dac); - err = alc_auto_add_stereo_vol(codec, pfx, 0, vol); + err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol); if (err < 0) return err; - err = alc_auto_add_stereo_sw(codec, pfx, 0, sw); + err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw); if (err < 0) return err; return 0; @@ -3236,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins, hda_nid_t dac = *dacs; if (!dac) dac = spec->multiout.dac_nids[0]; - return alc_auto_create_extra_out(codec, *pins, dac, pfx); + return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0); } if (dacs[num_pins - 1]) { /* OK, we have a multi-output system with individual volumes */ for (i = 0; i < num_pins; i++) { - snprintf(name, sizeof(name), "%s %s", - pfx, channel_name[i]); - err = alc_auto_create_extra_out(codec, pins[i], dacs[i], - name); + if (num_pins >= 3) { + snprintf(name, sizeof(name), "%s %s", + pfx, channel_name[i]); + err = alc_auto_create_extra_out(codec, pins[i], dacs[i], + name, 0); + } else { + err = alc_auto_create_extra_out(codec, pins[i], dacs[i], + pfx, i); + } if (err < 0) return err; } diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index f3658658548e..eeb25d529e30 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -215,6 +215,7 @@ struct sigmatel_spec { unsigned int gpio_mute; unsigned int gpio_led; unsigned int gpio_led_polarity; + unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */ unsigned int vref_led; /* stream */ @@ -4318,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec) spec->eapd_switch = val; get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity); if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) { - if (spec->gpio_led <= 8) { - spec->gpio_mask |= spec->gpio_led; - spec->gpio_dir |= spec->gpio_led; - if (spec->gpio_led_polarity) - spec->gpio_data |= spec->gpio_led; - } + spec->gpio_mask |= spec->gpio_led; + spec->gpio_dir |= spec->gpio_led; + if (spec->gpio_led_polarity) + spec->gpio_data |= spec->gpio_led; } } @@ -4441,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec) int pinctl, def_conf; /* power on when no jack detection is available */ - if (!spec->hp_detect) { + /* or when the VREF is used for controlling LED */ + if (!spec->hp_detect || + spec->vref_mute_led_nid == nid) { stac_toggle_power_map(codec, nid, 1); continue; } @@ -4913,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity) if (sscanf(dev->name, "HP_Mute_LED_%d_%x", &spec->gpio_led_polarity, &spec->gpio_led) == 2) { - if (spec->gpio_led < 4) + unsigned int max_gpio; + max_gpio = snd_hda_param_read(codec, codec->afg, + AC_PAR_GPIO_CAP); + max_gpio &= AC_GPIO_IO_COUNT; + if (spec->gpio_led < max_gpio) spec->gpio_led = 1 << spec->gpio_led; + else + spec->vref_mute_led_nid = spec->gpio_led; return 1; } if (sscanf(dev->name, "HP_Mute_LED_%d", @@ -5043,29 +5050,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec) struct sigmatel_spec *spec = codec->spec; /* sync mute LED */ - if (spec->gpio_led) { - if (spec->gpio_led <= 8) { - stac_gpio_set(codec, spec->gpio_mask, - spec->gpio_dir, spec->gpio_data); - } else { - stac_vrefout_set(codec, - spec->gpio_led, spec->vref_led); - } - } - return 0; -} - -static int stac92xx_post_suspend(struct hda_codec *codec) -{ - struct sigmatel_spec *spec = codec->spec; - if (spec->gpio_led > 8) { - /* with vref-out pin used for mute led control - * codec AFG is prevented from D3 state, but on - * system suspend it can (and should) be used - */ - snd_hda_codec_read(codec, codec->afg, 0, - AC_VERB_SET_POWER_STATE, AC_PWRST_D3); - } + if (spec->vref_mute_led_nid) + stac_vrefout_set(codec, spec->vref_mute_led_nid, + spec->vref_led); + else if (spec->gpio_led) + stac_gpio_set(codec, spec->gpio_mask, + spec->gpio_dir, spec->gpio_data); return 0; } @@ -5076,7 +5066,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg, struct sigmatel_spec *spec = codec->spec; if (power_state == AC_PWRST_D3) { - if (spec->gpio_led > 8) { + if (spec->vref_mute_led_nid) { /* with vref-out pin used for mute led control * codec AFG is prevented from D3 state */ @@ -5129,7 +5119,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec) } } /*polarity defines *not* muted state level*/ - if (spec->gpio_led <= 8) { + if (!spec->vref_mute_led_nid) { if (muted) spec->gpio_data &= ~spec->gpio_led; /* orange */ else @@ -5147,7 +5137,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec) muted_lvl = spec->gpio_led_polarity ? AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ; spec->vref_led = muted ? muted_lvl : notmtd_lvl; - stac_vrefout_set(codec, spec->gpio_led, spec->vref_led); + stac_vrefout_set(codec, spec->vref_mute_led_nid, + spec->vref_led); } return 0; } @@ -5661,15 +5652,13 @@ again: #ifdef CONFIG_SND_HDA_POWER_SAVE if (spec->gpio_led) { - if (spec->gpio_led <= 8) { + if (!spec->vref_mute_led_nid) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; spec->gpio_data |= spec->gpio_led; } else { codec->patch_ops.set_power_state = stac92xx_set_power_state; - codec->patch_ops.post_suspend = - stac92xx_post_suspend; } codec->patch_ops.pre_resume = stac92xx_pre_resume; codec->patch_ops.check_power_status = @@ -5976,15 +5965,13 @@ again: #ifdef CONFIG_SND_HDA_POWER_SAVE if (spec->gpio_led) { - if (spec->gpio_led <= 8) { + if (!spec->vref_mute_led_nid) { spec->gpio_mask |= spec->gpio_led; spec->gpio_dir |= spec->gpio_led; spec->gpio_data |= spec->gpio_led; } else { codec->patch_ops.set_power_state = stac92xx_set_power_state; - codec->patch_ops.post_suspend = - stac92xx_post_suspend; } codec->patch_ops.pre_resume = stac92xx_pre_resume; codec->patch_ops.check_power_status = diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c index a391e622a192..28dfafb56dd1 100644 --- a/sound/pci/sis7019.c +++ b/sound/pci/sis7019.c @@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}"); static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static int enable = 1; +static int codecs = 1; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator."); @@ -48,6 +49,8 @@ module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator."); module_param(enable, bool, 0444); MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator."); +module_param(codecs, int, 0444); +MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)"); static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) }, @@ -140,6 +143,9 @@ struct sis7019 { dma_addr_t silence_dma_addr; }; +/* These values are also used by the module param 'codecs' to indicate + * which codecs should be present. + */ #define SIS_PRIMARY_CODEC_PRESENT 0x0001 #define SIS_SECONDARY_CODEC_PRESENT 0x0002 #define SIS_TERTIARY_CODEC_PRESENT 0x0004 @@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis) { unsigned long io = sis->ioport; void __iomem *ioaddr = sis->ioaddr; + unsigned long timeout; u16 status; int count; int i; @@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis) while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count) udelay(1); + /* Command complete, we can let go of the semaphore now. + */ + outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA); + if (!count) + return -EIO; + /* Now that we've finished the reset, find out what's attached. + * There are some codec/board combinations that take an extremely + * long time to come up. 350+ ms has been observed in the field, + * so we'll give them up to 500ms. */ - status = inl(io + SIS_AC97_STATUS); - if (status & SIS_AC97_STATUS_CODEC_READY) - sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT; - if (status & SIS_AC97_STATUS_CODEC2_READY) - sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT; - if (status & SIS_AC97_STATUS_CODEC3_READY) - sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT; - - /* All done, let go of the semaphore, and check for errors + sis->codecs_present = 0; + timeout = msecs_to_jiffies(500) + jiffies; + while (time_before_eq(jiffies, timeout)) { + status = inl(io + SIS_AC97_STATUS); + if (status & SIS_AC97_STATUS_CODEC_READY) + sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT; + if (status & SIS_AC97_STATUS_CODEC2_READY) + sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT; + if (status & SIS_AC97_STATUS_CODEC3_READY) + sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT; + + if (sis->codecs_present == codecs) + break; + + msleep(1); + } + + /* All done, check for errors. */ - outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA); - if (!sis->codecs_present || !count) + if (!sis->codecs_present) { + printk(KERN_ERR "sis7019: could not find any codecs\n"); return -EIO; + } + + if (sis->codecs_present != codecs) { + printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n", + sis->codecs_present, codecs); + } /* Let the hardware know that the audio driver is alive, * and enable PCM slots on the AC-link for L/R playback (3 & 4) and @@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci, if (!enable) goto error_out; + /* The user can specify which codecs should be present so that we + * can wait for them to show up if they are slow to recover from + * the AC97 cold reset. We default to a single codec, the primary. + * + * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2. + */ + codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT | + SIS_TERTIARY_CODEC_PRESENT; + if (!codecs) + codecs = SIS_PRIMARY_CODEC_PRESENT; + rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card); if (rc < 0) goto error_out; diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig index bee3c94f58b0..d1fcc816ce97 100644 --- a/sound/soc/atmel/Kconfig +++ b/sound/soc/atmel/Kconfig @@ -1,6 +1,6 @@ config SND_ATMEL_SOC tristate "SoC Audio for the Atmel System-on-Chip" - depends on ARCH_AT91 || AVR32 + depends on ARCH_AT91 help Say Y or M if you want to add support for codecs attached to the ATMEL SSC interface. You will also need @@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731 Say Y if you want to add support for SoC audio on WM8731-based AT91sam9g20 evaluation board. -config SND_AT32_SOC_PLAYPAQ - tristate "SoC Audio support for PlayPaq with WM8510" - depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS - select SND_ATMEL_SOC_SSC - select SND_SOC_WM8510 - help - Say Y or M here if you want to add support for SoC audio - on the LRS PlayPaq. - -config SND_AT32_SOC_PLAYPAQ_SLAVE - bool "Run CODEC on PlayPaq in slave mode" - depends on SND_AT32_SOC_PLAYPAQ - default n - help - Say Y if you want to run with the AT32 SSC generating the BCLK - and FRAME signals on the PlayPaq. Unless you want to play - with the AT32 as the SSC master, you probably want to say N here, - as this will give you better sound quality. - config SND_AT91_SOC_AFEB9260 tristate "SoC Audio support for AFEB9260 board" depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile index e7ea56bd5f82..a5c0bf19da78 100644 --- a/sound/soc/atmel/Makefile +++ b/sound/soc/atmel/Makefile @@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o # AT91 Machine Support snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o -# AT32 Machine Support -snd-soc-playpaq-objs := playpaq_wm8510.o - obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o -obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c deleted file mode 100644 index 73ae99ad4578..000000000000 --- a/sound/soc/atmel/playpaq_wm8510.c +++ /dev/null @@ -1,473 +0,0 @@ -/* sound/soc/at32/playpaq_wm8510.c - * ASoC machine driver for PlayPaq using WM8510 codec - * - * Copyright (C) 2008 Long Range Systems - * Geoffrey Wossum <gwossum@acm.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c - * - * NOTE: If you don't have the AT32 enhanced portmux configured (which - * isn't currently in the mainline or Atmel patched kernel), you will - * need to set the MCLK pin (PA30) to peripheral A in your board initialization - * code. Something like: - * at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0); - * - */ - -/* #define DEBUG */ - -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/clk.h> -#include <linux/timer.h> -#include <linux/interrupt.h> -#include <linux/platform_device.h> - -#include <sound/core.h> -#include <sound/pcm.h> -#include <sound/pcm_params.h> -#include <sound/soc.h> - -#include <mach/at32ap700x.h> -#include <mach/portmux.h> - -#include "../codecs/wm8510.h" -#include "atmel-pcm.h" -#include "atmel_ssc_dai.h" - - -/*-------------------------------------------------------------------------*\ - * constants -\*-------------------------------------------------------------------------*/ -#define MCLK_PIN GPIO_PIN_PA(30) -#define MCLK_PERIPH GPIO_PERIPH_A - - -/*-------------------------------------------------------------------------*\ - * data types -\*-------------------------------------------------------------------------*/ -/* SSC clocking data */ -struct ssc_clock_data { - /* CMR div */ - unsigned int cmr_div; - - /* Frame period (as needed by xCMR.PERIOD) */ - unsigned int period; - - /* The SSC clock rate these settings where calculated for */ - unsigned long ssc_rate; -}; - - -/*-------------------------------------------------------------------------*\ - * module data -\*-------------------------------------------------------------------------*/ -static struct clk *_gclk0; -static struct clk *_pll0; - -#define CODEC_CLK (_gclk0) - - -/*-------------------------------------------------------------------------*\ - * Sound SOC operations -\*-------------------------------------------------------------------------*/ -#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE -static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock( - struct snd_pcm_hw_params *params, - struct snd_soc_dai *cpu_dai) -{ - struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai); - struct ssc_device *ssc = ssc_p->ssc; - struct ssc_clock_data cd; - unsigned int rate, width_bits, channels; - unsigned int bitrate, ssc_div; - unsigned actual_rate; - - - /* - * Figure out required bitrate - */ - rate = params_rate(params); - channels = params_channels(params); - width_bits = snd_pcm_format_physical_width(params_format(params)); - bitrate = rate * width_bits * channels; - - - /* - * Figure out required SSC divider and period for required bitrate - */ - cd.ssc_rate = clk_get_rate(ssc->clk); - ssc_div = cd.ssc_rate / bitrate; - cd.cmr_div = ssc_div / 2; - if (ssc_div & 1) { - /* round cmr_div up */ - cd.cmr_div++; - } - cd.period = width_bits - 1; - - - /* - * Find actual rate, compare to requested rate - */ - actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1)); - pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n", - rate, actual_rate); - - - return cd; -} -#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */ - - - -static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params) -{ - struct snd_soc_pcm_runtime *rtd = substream->private_data; - struct snd_soc_dai *codec_dai = rtd->codec_dai; - struct snd_soc_dai *cpu_dai = rtd->cpu_dai; - struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai); - struct ssc_device *ssc = ssc_p->ssc; - unsigned int pll_out = 0, bclk = 0, mclk_div = 0; - int ret; - - - /* Due to difficulties with getting the correct clocks from the AT32's - * PLL0, we're going to let the CODEC be in charge of all the clocks - */ -#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE - const unsigned int fmt = (SND_SOC_DAIFMT_I2S | - SND_SOC_DAIFMT_NB_NF | - SND_SOC_DAIFMT_CBM_CFM); -#else - struct ssc_clock_data cd; - const unsigned int fmt = (SND_SOC_DAIFMT_I2S | - SND_SOC_DAIFMT_NB_NF | - SND_SOC_DAIFMT_CBS_CFS); -#endif - - if (ssc == NULL) { - pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n"); - return -EINVAL; - } - - - /* - * Figure out PLL and BCLK dividers for WM8510 - */ - switch (params_rate(params)) { - case 48000: - pll_out = 24576000; - mclk_div = WM8510_MCLKDIV_2; - bclk = WM8510_BCLKDIV_8; - break; - - case 44100: - pll_out = 22579200; - mclk_div = WM8510_MCLKDIV_2; - bclk = WM8510_BCLKDIV_8; - break; - - case 22050: - pll_out = 22579200; - mclk_div = WM8510_MCLKDIV_4; - bclk = WM8510_BCLKDIV_8; - break; - - case 16000: - pll_out = 24576000; - mclk_div = WM8510_MCLKDIV_6; - bclk = WM8510_BCLKDIV_8; - break; - - case 11025: - pll_out = 22579200; - mclk_div = WM8510_MCLKDIV_8; - bclk = WM8510_BCLKDIV_8; - break; - - case 8000: - pll_out = 24576000; - mclk_div = WM8510_MCLKDIV_12; - bclk = WM8510_BCLKDIV_8; - break; - - default: - pr_warning("playpaq_wm8510: Unsupported sample rate %d\n", - params_rate(params)); - return -EINVAL; - } - - - /* - * set CPU and CODEC DAI configuration - */ - ret = snd_soc_dai_set_fmt(codec_dai, fmt); - if (ret < 0) { - pr_warning("playpaq_wm8510: " - "Failed to set CODEC DAI format (%d)\n", - ret); - return ret; - } - ret = snd_soc_dai_set_fmt(cpu_dai, fmt); - if (ret < 0) { - pr_warning("playpaq_wm8510: " - "Failed to set CPU DAI format (%d)\n", - ret); - return ret; - } - - - /* - * Set CPU clock configuration - */ -#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE - cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai); - pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n", - cd.cmr_div, cd.period); - ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div); - if (ret < 0) { - pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n", - ret); - return ret; - } - ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD, - cd.period); - if (ret < 0) { - pr_warning("playpaq_wm8510: " - "Failed to set CPU transmit period (%d)\n", - ret); - return ret; - } -#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */ - - - /* - * Set CODEC clock configuration - */ - pr_debug("playpaq_wm8510: " - "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n", - clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div); - - -#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE - ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk); - if (ret < 0) { - pr_warning - ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n", - ret); - return ret; - } -#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */ - - - ret = snd_soc_dai_set_pll(codec_dai, 0, 0, - clk_get_rate(CODEC_CLK), pll_out); - if (ret < 0) { - pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n", - ret); - return ret; - } - - - ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div); - if (ret < 0) { - pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n", - ret); - return ret; - } - - - return 0; -} - - - -static struct snd_soc_ops playpaq_wm8510_ops = { - .hw_params = playpaq_wm8510_hw_params, -}; - - - -static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = { - SND_SOC_DAPM_MIC("Int Mic", NULL), - SND_SOC_DAPM_SPK("Ext Spk", NULL), -}; - - - -static const struct snd_soc_dapm_route intercon[] = { - /* speaker connected to SPKOUT */ - {"Ext Spk", NULL, "SPKOUTP"}, - {"Ext Spk", NULL, "SPKOUTN"}, - - {"Mic Bias", NULL, "Int Mic"}, - {"MICN", NULL, "Mic Bias"}, - {"MICP", NULL, "Mic Bias"}, -}; - - - -static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd) -{ - struct snd_soc_codec *codec = rtd->codec; - struct snd_soc_dapm_context *dapm = &codec->dapm; - int i; - - /* - * Add DAPM widgets - */ - for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++) - snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]); - - - - /* - * Setup audio path interconnects - */ - snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); - - - - /* always connected pins */ - snd_soc_dapm_enable_pin(dapm, "Int Mic"); - snd_soc_dapm_enable_pin(dapm, "Ext Spk"); - - - - /* Make CSB show PLL rate */ - snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV, - WM8510_OPCLKDIV_1 | 4); - - return 0; -} - - - -static struct snd_soc_dai_link playpaq_wm8510_dai = { - .name = "WM8510", - .stream_name = "WM8510 PCM", - .cpu_dai_name= "atmel-ssc-dai.0", - .platform_name = "atmel-pcm-audio", - .codec_name = "wm8510-codec.0-0x1a", - .codec_dai_name = "wm8510-hifi", - .init = playpaq_wm8510_init, - .ops = &playpaq_wm8510_ops, -}; - - - -static struct snd_soc_card snd_soc_playpaq = { - .name = "LRS_PlayPaq_WM8510", - .dai_link = &playpaq_wm8510_dai, - .num_links = 1, -}; - -static struct platform_device *playpaq_snd_device; - - -static int __init playpaq_asoc_init(void) -{ - int ret = 0; - - /* - * Configure MCLK for WM8510 - */ - _gclk0 = clk_get(NULL, "gclk0"); - if (IS_ERR(_gclk0)) { - _gclk0 = NULL; - ret = PTR_ERR(_gclk0); - goto err_gclk0; - } - _pll0 = clk_get(NULL, "pll0"); - if (IS_ERR(_pll0)) { - _pll0 = NULL; - ret = PTR_ERR(_pll0); - goto err_pll0; - } - ret = clk_set_parent(_gclk0, _pll0); - if (ret) { - pr_warning("snd-soc-playpaq: " - "Failed to set PLL0 as parent for DAC clock\n"); - goto err_set_clk; - } - clk_set_rate(CODEC_CLK, 12000000); - clk_enable(CODEC_CLK); - -#if defined CONFIG_AT32_ENHANCED_PORTMUX - at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0); -#endif - - - /* - * Create and register platform device - */ - playpaq_snd_device = platform_device_alloc("soc-audio", 0); - if (playpaq_snd_device == NULL) { - ret = -ENOMEM; - goto err_device_alloc; - } - - platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq); - - ret = platform_device_add(playpaq_snd_device); - if (ret) { - pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n", - ret); - goto err_device_add; - } - - return 0; - - -err_device_add: - if (playpaq_snd_device != NULL) { - platform_device_put(playpaq_snd_device); - playpaq_snd_device = NULL; - } -err_device_alloc: -err_set_clk: - if (_pll0 != NULL) { - clk_put(_pll0); - _pll0 = NULL; - } -err_pll0: - if (_gclk0 != NULL) { - clk_put(_gclk0); - _gclk0 = NULL; - } - return ret; -} - - -static void __exit playpaq_asoc_exit(void) -{ - if (_gclk0 != NULL) { - clk_put(_gclk0); - _gclk0 = NULL; - } - if (_pll0 != NULL) { - clk_put(_pll0); - _pll0 = NULL; - } - -#if defined CONFIG_AT32_ENHANCED_PORTMUX - at32_free_pin(MCLK_PIN); -#endif - - platform_device_unregister(playpaq_snd_device); - playpaq_snd_device = NULL; -} - -module_init(playpaq_asoc_init); -module_exit(playpaq_asoc_exit); - -MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>"); -MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq"); -MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h index 444747f0db26..dd7be0dbbc58 100644 --- a/sound/soc/codecs/ad1836.h +++ b/sound/soc/codecs/ad1836.h @@ -34,7 +34,7 @@ #define AD1836_ADC_CTRL2 13 #define AD1836_ADC_WORD_LEN_MASK 0x30 -#define AD1836_ADC_WORD_OFFSET 5 +#define AD1836_ADC_WORD_OFFSET 4 #define AD1836_ADC_SERFMT_MASK (7 << 6) #define AD1836_ADC_SERFMT_PCK256 (0x4 << 6) #define AD1836_ADC_SERFMT_PCK128 (0x5 << 6) diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c index f1f237ecec2a..73f46eb459f1 100644 --- a/sound/soc/codecs/cs4270.c +++ b/sound/soc/codecs/cs4270.c @@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg) static int cs4270_soc_resume(struct snd_soc_codec *codec) { struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec); - struct i2c_client *i2c_client = to_i2c_client(codec->dev); int reg; regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies), @@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec) ndelay(500); /* first restore the entire register cache ... */ - for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) { - u8 val = snd_soc_read(codec, reg); - - if (i2c_smbus_write_byte_data(i2c_client, reg, val)) { - dev_err(codec->dev, "i2c write failed\n"); - return -EIO; - } - } + snd_soc_cache_sync(codec); /* ... then disable the power-down bits */ reg = snd_soc_read(codec, CS4270_PWRCTL); diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index 8c3c8205d19e..1ee66361f61b 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec) static struct snd_soc_codec_driver soc_codec_device_cs42l51 = { .probe = cs42l51_probe, - .reg_cache_size = CS42L51_NUMREGS, + .reg_cache_size = CS42L51_NUMREGS + 1, .reg_word_size = sizeof(u8), }; diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c index 9e7e964a5fa3..dcf6f2a1600a 100644 --- a/sound/soc/codecs/max9877.c +++ b/sound/soc/codecs/max9877.c @@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol, unsigned int mask = mc->max; unsigned int val = (ucontrol->value.integer.value[0] & mask); unsigned int val2 = (ucontrol->value.integer.value[1] & mask); - unsigned int change = 1; + unsigned int change = 0; - if (((max9877_regs[reg] >> shift) & mask) == val) - change = 0; + if (((max9877_regs[reg] >> shift) & mask) != val) + change = 1; - if (((max9877_regs[reg2] >> shift) & mask) == val2) - change = 0; + if (((max9877_regs[reg2] >> shift) & mask) != val2) + change = 1; if (change) { max9877_regs[reg] &= ~(mask << shift); diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c index c5ca8cfea60f..0441893e270e 100644 --- a/sound/soc/codecs/uda1380.c +++ b/sound/soc/codecs/uda1380.c @@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = { static int __init uda1380_modinit(void) { - int ret; + int ret = 0; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&uda1380_i2c_driver); if (ret != 0) pr_err("Failed to register UDA1380 I2C driver: %d\n", ret); #endif - return 0; + return ret; } module_init(uda1380_modinit); diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 9c982e47eb99..d0c545b73d78 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), }; static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = { -SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, - adc_mux_ev, SND_SOC_DAPM_PRE_PMU), -SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, - adc_mux_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), +SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, + adc_mux_ev, SND_SOC_DAPM_PRE_PMU), }; static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = { -SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), -SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), +SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), +SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), }; static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { @@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT; lrclk = bclk_rate / params_rate(params); + if (!lrclk) { + dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n", + bclk_rate); + return -EINVAL; + } dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n", lrclk, bclk_rate / lrclk); @@ -3178,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) switch (wm8994->revision) { case 0: case 1: + case 2: + case 3: wm8994->hubs.dcs_codes_l = -9; wm8994->hubs.dcs_codes_r = -5; break; diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c index 31af405bda84..ae49f1c78c6d 100644 --- a/sound/soc/fsl/mpc8610_hpcd.c +++ b/sound/soc/fsl/mpc8610_hpcd.c @@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev) } if (strcasecmp(sprop, "i2s-slave") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_I2S; + machine_data->dai_format = + SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; @@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev) } machine_data->clk_frequency = be32_to_cpup(iprop); } else if (strcasecmp(sprop, "i2s-master") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_I2S; + machine_data->dai_format = + SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else if (strcasecmp(sprop, "lj-slave") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J; + machine_data->dai_format = + SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; } else if (strcasecmp(sprop, "lj-master") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J; + machine_data->dai_format = + SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else if (strcasecmp(sprop, "rj-slave") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J; + machine_data->dai_format = + SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; } else if (strcasecmp(sprop, "rj-master") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J; + machine_data->dai_format = + SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else if (strcasecmp(sprop, "ac97-slave") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_AC97; + machine_data->dai_format = + SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; } else if (strcasecmp(sprop, "ac97-master") == 0) { - machine_data->dai_format = SND_SOC_DAIFMT_AC97; + machine_data->dai_format = + SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else { diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig index b133bfcc5848..738391757f2c 100644 --- a/sound/soc/imx/Kconfig +++ b/sound/soc/imx/Kconfig @@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1 config SND_SOC_MX27VIS_AIC32X4 tristate "SoC audio support for Visstrim M10 boards" - depends on MACH_IMX27_VISSTRIM_M10 + depends on MACH_IMX27_VISSTRIM_M10 && I2C select SND_SOC_TLV320AIC32X4 select SND_MXC_SOC_MX2 help diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig index 8f49e165f4d1..c62d715235e2 100644 --- a/sound/soc/kirkwood/Kconfig +++ b/sound/soc/kirkwood/Kconfig @@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S config SND_KIRKWOOD_SOC_OPENRD tristate "SoC Audio support for Kirkwood Openrd Client" depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE) + depends on I2C select SND_KIRKWOOD_SOC_I2S select SND_SOC_CS42L51 help @@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD config SND_KIRKWOOD_SOC_T5325 tristate "SoC Audio support for HP t5325" - depends on SND_KIRKWOOD_SOC && MACH_T5325 + depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C select SND_KIRKWOOD_SOC_I2S select SND_SOC_ALC5623 help diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig index ffd2242e305f..a0f7d3cfa470 100644 --- a/sound/soc/pxa/Kconfig +++ b/sound/soc/pxa/Kconfig @@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE config SND_SOC_RAUMFELD tristate "SoC Audio support Raumfeld audio adapter" depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR) + depends on I2C && SPI_MASTER select SND_PXA_SOC_SSP select SND_SOC_CS4270 select SND_SOC_AK4104 @@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD config SND_PXA2XX_SOC_HX4700 tristate "SoC Audio support for HP iPAQ hx4700" - depends on SND_PXA2XX_SOC && MACH_H4700 + depends on SND_PXA2XX_SOC && MACH_H4700 && I2C select SND_PXA2XX_SOC_I2S select SND_SOC_AK4641 help diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c index f75e43997d5b..ad9ac42522e2 100644 --- a/sound/soc/samsung/smdk_wm8994.c +++ b/sound/soc/samsung/smdk_wm8994.c @@ -9,6 +9,7 @@ #include "../codecs/wm8994.h" #include <sound/pcm_params.h> +#include <linux/module.h> /* * Default CFG switch settings to use this driver: diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c index 85bf541a771d..4b8e35410eb1 100644 --- a/sound/soc/samsung/speyside.c +++ b/sound/soc/samsung/speyside.c @@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card) snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic"); snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC"); snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC"); - snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker"); + snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker"); snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output"); snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input"); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index a5d3685a5d38..a25fa63ce9a2 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev) struct snd_soc_card *card = dev_get_drvdata(dev); int i, ac97_control = 0; + /* If the initialization of this soc device failed, there is no codec + * associated with it. Just bail out in this case. + */ + if (list_empty(&card->codec_dev_list)) + return 0; + /* AC97 devices might have other drivers hanging off them so * need to resume immediately. Other drivers don't have that * problem and may take a substantial amount of time to resume diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c index 0c12b98484bd..4220bb0f2730 100644 --- a/sound/soc/soc-utils.c +++ b/sound/soc/soc-utils.c @@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params) } EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk); -static struct snd_soc_platform_driver dummy_platform; +static const struct snd_pcm_hardware dummy_dma_hardware = { + .formats = 0xffffffff, + .channels_min = 1, + .channels_max = UINT_MAX, + + /* Random values to keep userspace happy when checking constraints */ + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_BLOCK_TRANSFER, + .buffer_bytes_max = 128*1024, + .period_bytes_min = PAGE_SIZE, + .period_bytes_max = PAGE_SIZE*2, + .periods_min = 2, + .periods_max = 128, +}; + +static int dummy_dma_open(struct snd_pcm_substream *substream) +{ + snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware); + + return 0; +} + +static struct snd_pcm_ops dummy_dma_ops = { + .open = dummy_dma_open, + .ioctl = snd_pcm_lib_ioctl, +}; + +static struct snd_soc_platform_driver dummy_platform = { + .ops = &dummy_dma_ops, +}; static __devinit int snd_soc_dummy_probe(struct platform_device *pdev) { diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index b61945f3af9e..32d2a21f2e3b 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -1633,6 +1633,37 @@ YAMAHA_DEVICE(0x7010, "UB99"), } }, { + /* Roland GAIA SH-01 */ + USB_DEVICE(0x0582, 0x0111), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Roland", + .product_name = "GAIA", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 2, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = &(const struct snd_usb_midi_endpoint_info) { + .out_cables = 0x0003, + .in_cables = 0x0003 + } + }, + { + .ifnum = -1 + } + } + } +}, +{ USB_DEVICE(0x0582, 0x0113), .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { /* .vendor_name = "BOSS", */ diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 7d98676808d8..955930e0a5c3 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv) list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter, first) < 0) { - if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { + if (errno == EINVAL || errno == ENOSYS || + errno == ENOENT || errno == EOPNOTSUPP) { if (verbose) ui__warning("%s event is not supported by the kernel.\n", event_name(counter)); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index e42626422587..d7915d4e77cb 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type) return size; } +static void hists__init(struct hists *hists) +{ + memset(hists, 0, sizeof(*hists)); + hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; + hists->entries_in = &hists->entries_in_array[0]; + hists->entries_collapsed = RB_ROOT; + hists->entries = RB_ROOT; + pthread_mutex_init(&hists->lock, NULL); +} + void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx) { diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index bcd05d05b4f0..33c17a2b2a81 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used, /* * write event string as passed on cmdline */ - ret = do_write_string(fd, attr->name); + ret = do_write_string(fd, event_name(attr)); if (ret < 0) return ret; /* diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index a36a3fa81ffb..abef2703cd24 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) return ret; } - -void hists__init(struct hists *hists) -{ - memset(hists, 0, sizeof(*hists)); - hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; - hists->entries_in = &hists->entries_in_array[0]; - hists->entries_collapsed = RB_ROOT; - hists->entries = RB_ROOT; - pthread_mutex_init(&hists->lock, NULL); -} diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index c86c1d27bd1e..89289c8e935e 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -63,8 +63,6 @@ struct hists { struct callchain_cursor callchain_cursor; }; -void hists__init(struct hists *hists); - struct hist_entry *__hists__add_entry(struct hists *self, struct addr_location *al, struct symbol *parent, u64 period); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 85c1e6b76f0a..0f4555ce9063 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1333,6 +1333,10 @@ int perf_session__cpu_bitmap(struct perf_session *session, } map = cpu_map__new(cpu_list); + if (map == NULL) { + pr_err("Invalid cpu_list\n"); + return -1; + } for (i = 0; i < map->nr; i++) { int cpu = map->map[i]; diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 0a7ed5b5e281..6c164dc9ee95 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); + while (type == EVENT_OP) + type = process_op(event, field, &token); if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; |