diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-09-13 17:18:38 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-09-13 17:18:38 +0200 |
commit | 4553f0b90e1fd93b95a97918804528d4039201fc (patch) | |
tree | e17524a262d3e0708479ad35edb706c3e1af5415 | |
parent | be267be8b191d5fac9f65a29e047470f364315eb (diff) | |
parent | 7ece55a4a3a04abe37118b1d4fb0b702eeb1de4c (diff) | |
download | linux-4553f0b90e1fd93b95a97918804528d4039201fc.tar.gz linux-4553f0b90e1fd93b95a97918804528d4039201fc.tar.bz2 linux-4553f0b90e1fd93b95a97918804528d4039201fc.zip |
Merge branch 'core/rcu' into perf/core
Steve Rostedt asked for the merge of a single commit, into both
the RCU and the perf/tracing tree:
| Josh made a change to the tracing code that affects both the
| work Paul McKenney and I are currently doing. At the last
| Kernel Summit back in August, Linus said when such a case
| exists, it is best to make a separate branch based off of his
| tree and place the change there. This way, the repositories
| that need to share the change can both pull them in and the
| SHA1 will match for both. Whichever branch is pulled in first
| by Linus will also pull in the necessary change for the other
| branch as well.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
210 files changed, 1568 insertions, 966 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index 34f51100f029..dff1f48d252d 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci @@ -210,3 +210,15 @@ Users: firmware assigned instance number of the PCI device that can help in understanding the firmware intended order of the PCI device. + +What: /sys/bus/pci/devices/.../d3cold_allowed +Date: July 2012 +Contact: Huang Ying <ying.huang@intel.com> +Description: + d3cold_allowed is bit to control whether the corresponding PCI + device can be put into D3Cold state. If it is cleared, the + device will never be put into D3Cold state. If it is set, the + device may be put into D3Cold state if other requirements are + satisfied too. Reading this attribute will show the current + value of d3cold_allowed bit. Writing this attribute will set + the value of d3cold_allowed bit. diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index afaff312bf41..f4d8c7105fcd 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more ---------------------------- What: at91-mci driver ("CONFIG_MMC_AT91") -When: 3.7 +When: 3.8 Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support was added to atmel-mci as a first step to support more chips. Then at91-mci was kept only for old IP versions (on at91rm9200 and diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c index 73ff5cc93e05..3da822967ee0 100644 --- a/Documentation/watchdog/src/watchdog-test.c +++ b/Documentation/watchdog/src/watchdog-test.c @@ -31,7 +31,7 @@ static void keep_alive(void) * or "-e" to enable the card. */ -void term(int sig) +static void term(int sig) { close(fd); fprintf(stderr, "Stopping watchdog ticks...\n"); @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc5 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c5f9ae5dbd1a..2f88d8d97701 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -6,7 +6,7 @@ config ARM select HAVE_DMA_API_DEBUG select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_DMA_ATTRS - select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) + select HAVE_DMA_CONTIGUOUS if MMU select HAVE_MEMBLOCK select RTC_LIB select SYS_SUPPORTS_APM_EMULATION diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts index 7829a4d0cb22..96514c134e54 100644 --- a/arch/arm/boot/dts/at91sam9g25ek.dts +++ b/arch/arm/boot/dts/at91sam9g25ek.dts @@ -15,7 +15,7 @@ compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; chosen { - bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; }; ahb { diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig index 7d8718468e0d..90610c7030f7 100644 --- a/arch/arm/configs/armadillo800eva_defconfig +++ b/arch/arm/configs/armadillo800eva_defconfig @@ -33,7 +33,7 @@ CONFIG_AEABI=y CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" +CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw" CONFIG_CMDLINE_FORCE=y CONFIG_KEXEC=y CONFIG_VFP=y diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2ae842df4551..5c44dcb0987b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, } /* + * This can be called during early boot to increase the size of the atomic + * coherent DMA pool above the default value of 256KiB. It must be called + * before postcore_initcall. + */ +extern void __init init_dma_coherent_pool_size(unsigned long size); + +/* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the * memory allocator is initialised, i.e. before any core_initcall. diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 104ca40d8d18..aaa443b48c91 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void) at91_st_read(AT91_ST_SR); /* Make IRQs happen for the system timer */ - setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); + setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used * directly for the clocksource and all clockevents, after adjusting diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 7b9c2ba396ed..bce572a530ef 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -726,6 +726,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, }, }; @@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9260_rtt_device.num_resources = 2; + at91sam9260_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9260_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 8df5c1bdff92..bc2590d712d0 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -609,6 +609,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9261_rtt_device.num_resources = 2; + at91sam9261_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9261_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index eb6bbf86fb9f..9b6ca734f1a9 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed only for the chosen RTT: * GPBR will serve as the storage for RTC time offset */ - at91sam9263_rtt0_device.num_resources = 2; + at91sam9263_rtt0_device.num_resources = 3; at91sam9263_rtt1_device.num_resources = 1; pdev = &at91sam9263_rtt0_device; r = rtt0_resources; break; case 1: at91sam9263_rtt0_device.num_resources = 1; - at91sam9263_rtt1_device.num_resources = 2; + at91sam9263_rtt1_device.num_resources = 3; pdev = &at91sam9263_rtt1_device; r = rtt1_resources; break; @@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void) pdev->name = "rtc-at91sam9"; r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; r[1].end = r[1].start + 3; + r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 06073996a382..1b47319ca00b 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9g45_rtt_device.num_resources = 2; + at91sam9g45_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index f09fff932172..b3d365dadef5 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -688,6 +688,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9rl_rtt_device.num_resources = 2; + at91sam9rl_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index de2ec6b8fea7..188c82971ebd 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c @@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base); #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) +#define cpu_has_240M_plla() (cpu_is_at91sam9261() \ + || cpu_is_at91sam9263() \ + || cpu_is_at91sam9rl()) + +#define cpu_has_210M_plla() (cpu_is_at91sam9260()) + #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ @@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock) } else if (cpu_has_800M_plla()) { if (plla.rate_hz > 800000000) pll_overclock = true; + } else if (cpu_has_240M_plla()) { + if (plla.rate_hz > 240000000) + pll_overclock = true; + } else if (cpu_has_210M_plla()) { + if (plla.rate_hz > 210000000) + pll_overclock = true; } else { if (plla.rate_hz > 209000000) pll_overclock = true; diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c index ca70e5fcc7ac..020852d3bdd8 100644 --- a/arch/arm/mach-gemini/irq.c +++ b/arch/arm/mach-gemini/irq.c @@ -17,6 +17,7 @@ #include <linux/sched.h> #include <asm/irq.h> #include <asm/mach/irq.h> +#include <asm/system_misc.h> #include <mach/hardware.h> #define IRQ_SOURCE(base_addr) (base_addr + 0x00) diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 3226077735b1..1201191d7f1b 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void) void __init kirkwood_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); + + /* + * Some Kirkwood devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); } int kirkwood_tclk; diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c index d93359379598..be90b7d0e10b 100644 --- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c +++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/sizes.h> #include <linux/platform_device.h> #include <linux/mtd/partitions.h> #include <linux/ata_platform.h> diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index cf10f92856dc..453a6e50db8b 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = { }; /* GPIO KEY */ -#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } +#define GPIO_KEY(c, g, d, ...) \ + { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } static struct gpio_keys_button gpio_buttons[] = { - GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"), - GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"), - GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"), - GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"), + GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1), + GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"), + GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"), + GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"), }; static struct gpio_keys_platform_data gpio_key_info = { @@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = { &camera_device, &ceu0_device, &fsi_device, - &fsi_hdmi_device, &fsi_wm8978_device, + &fsi_hdmi_device, }; static void __init eva_clock_init(void) diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 7ea2b31e3199..c129542f6aed 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = { * - J30 "open" * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET * - add .get_vbus = usbhs_get_vbus in usbhs1_private + * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices. */ #define IRQ8 evt2irq(0x0300) #define USB_PHY_MODE (1 << 4) @@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = { &nor_flash_device, &smc911x_device, &lcdc_device, - &usbhs1_device, &usbhs0_device, + &usbhs1_device, &leds_device, &fsi_device, &fsi_ak4643_device, diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index 3a528cf4366c..fcf5a47f4772 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c @@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = { static struct platform_device eth_device = { .name = "smsc911x", - .id = 0, + .id = -1, .dev = { .platform_data = &smsc911x_platdata, }, diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c index ee447404c857..588555a67d9c 100644 --- a/arch/arm/mach-shmobile/intc-sh73a0.c +++ b/arch/arm/mach-shmobile/intc-sh73a0.c @@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on) return 0; /* always allow wakeup */ } -#define RELOC_BASE 0x1000 +#define RELOC_BASE 0x1200 -/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */ +/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */ #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000, diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4e7d1182e8a3..051204fc4617 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size) vunmap(cpu_addr); } +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K + struct dma_pool { size_t size; spinlock_t lock; unsigned long *bitmap; unsigned long nr_pages; void *vaddr; - struct page *page; + struct page **pages; }; static struct dma_pool atomic_pool = { - .size = SZ_256K, + .size = DEFAULT_DMA_COHERENT_POOL_SIZE, }; static int __init early_coherent_pool(char *p) @@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); +void __init init_dma_coherent_pool_size(unsigned long size) +{ + /* + * Catch any attempt to set the pool size too late. + */ + BUG_ON(atomic_pool.vaddr); + + /* + * Set architecture specific coherent pool size only if + * it has not been changed by kernel command line parameter. + */ + if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) + atomic_pool.size = size; +} + /* * Initialise the coherent pool for atomic allocations. */ @@ -297,6 +314,7 @@ static int __init atomic_pool_init(void) unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; + struct page **pages; void *ptr; int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); @@ -304,21 +322,31 @@ static int __init atomic_pool_init(void) if (!bitmap) goto no_bitmap; + pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto no_pages; + if (IS_ENABLED(CONFIG_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); else ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, &page, NULL); if (ptr) { + int i; + + for (i = 0; i < nr_pages; i++) + pages[i] = page + i; + spin_lock_init(&pool->lock); pool->vaddr = ptr; - pool->page = page; + pool->pages = pages; pool->bitmap = bitmap; pool->nr_pages = nr_pages; pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", (unsigned)pool->size / 1024); return 0; } +no_pages: kfree(bitmap); no_bitmap: pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", @@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; - *ret_page = pool->page + pageno; + *ret_page = pool->pages[pageno]; + } else { + pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" + "Please increase it with coherent_pool= kernel parameter!\n", + (unsigned)pool->size / 1024); } spin_unlock_irqrestore(&pool->lock, flags); return ptr; } +static bool __in_atomic_pool(void *start, size_t size) +{ + struct dma_pool *pool = &atomic_pool; + void *end = start + size; + void *pool_start = pool->vaddr; + void *pool_end = pool->vaddr + pool->size; + + if (start < pool_start || start > pool_end) + return false; + + if (end <= pool_end) + return true; + + WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", + start, end - 1, pool_start, pool_end - 1); + + return false; +} + static int __free_from_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; unsigned long pageno, count; unsigned long flags; - if (start < pool->vaddr || start > pool->vaddr + pool->size) + if (!__in_atomic_pool(start, size)) return 0; - if (start + size > pool->vaddr + pool->size) { - WARN(1, "freeing wrong coherent size from pool\n"); - return 0; - } - pageno = (start - pool->vaddr) >> PAGE_SHIFT; count = size >> PAGE_SHIFT; @@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si return 0; } +static struct page **__atomic_get_pages(void *addr) +{ + struct dma_pool *pool = &atomic_pool; + struct page **pages = pool->pages; + int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + + return pages + offs; +} + static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) { struct vm_struct *area; + if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) + return __atomic_get_pages(cpu_addr); + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return cpu_addr; @@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) return NULL; } +static void *__iommu_alloc_atomic(struct device *dev, size_t size, + dma_addr_t *handle) +{ + struct page *page; + void *addr; + + addr = __alloc_from_pool(size, &page); + if (!addr) + return NULL; + + *handle = __iommu_create_mapping(dev, &page, size); + if (*handle == DMA_ERROR_CODE) + goto err_mapping; + + return addr; + +err_mapping: + __free_from_pool(addr, size); + return NULL; +} + +static void __iommu_free_atomic(struct device *dev, struct page **pages, + dma_addr_t handle, size_t size) +{ + __iommu_remove_mapping(dev, handle, size); + __free_from_pool(page_address(pages[0]), size); +} + static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { @@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); + if (gfp & GFP_ATOMIC) + return __iommu_alloc_atomic(dev, size, handle); + pages = __iommu_alloc_buffer(dev, size, gfp); if (!pages) return NULL; @@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, return; } + if (__in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, pages, handle, size); + return; + } + if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 6c6defc24619..af9cf30ed474 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) +#define ATOMIC_INIT(i) { (i) } #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() @@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) +#define ATOMIC64_INIT(i) { (i) } static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index d4b94b395c16..2c05a9292a81 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + (pregs->gr[21] & (THREAD_SIZE - 1)); cregs->gr[30] = usp; - if (p->personality == PER_HPUX) { + if (personality(p->personality) == PER_HPUX) { #ifdef CONFIG_HPUX cregs->kpc = (unsigned long) &hpux_child_return; #else diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index c9b932260f47..7426e40699bd 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality) long err; if (personality(current->personality) == PER_LINUX32 - && personality == PER_LINUX) - personality = PER_LINUX32; + && personality(personality) == PER_LINUX) + personality = (personality & ~PER_MASK) | PER_LINUX32; err = sys_personality(personality); - if (err == PER_LINUX32) - err = PER_LINUX; + if (personality(err) == PER_LINUX32) + err = (err & ~PER_MASK) | PER_LINUX; return err; } diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 53b6dfa83344..54b73a28c205 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; extern int powersave_nap; /* set if nap mode can be used in idle loop */ +extern void power7_nap(void); #ifdef CONFIG_PSERIES_IDLE extern void update_smt_snooze_delay(int snooze); diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 85b05c463fae..e8995727b1c1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -76,6 +76,7 @@ int main(void) DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); + DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 5b25c8060fd6..a892680668d8 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c @@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void) void doorbell_cause_ipi(int cpu, unsigned long data) { + /* Order previous accesses vs. msgsnd, which is treated as a store */ + mb(); ppc_msgsnd(PPC_DBELL, 0, data); } diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 4b01a25e29ef..b40e0b4815b3 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork) li r3,0 b syscall_exit + .section ".toc","aw" +DSCR_DEFAULT: + .tc dscr_default[TC],dscr_default + + .section ".text" + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) - ld r6,_CCR(r1) - mtcrf 0xFF,r6 - #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) @@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION + lwz r6,THREAD_DSCR_INHERIT(r4) + ld r7,DSCR_DEFAULT@toc(2) ld r0,THREAD_DSCR(r4) - cmpd r0,r25 - beq 1f + cmpwi r6,0 + bne 1f + ld r0,0(r7) +1: cmpd r0,r25 + beq 2f mtspr SPRN_DSCR,r0 -1: +2: END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #endif + ld r6,_CCR(r1) + mtcrf 0xFF,r6 + /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) REST_10GPRS(22, r1) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e894515e77bb..39aa97d3ff88 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -186,7 +186,7 @@ hardware_interrupt_hv: KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) - MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) + STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) @@ -486,6 +486,7 @@ machine_check_common: STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) + STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 7140d838339e..e11863f4e595 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -28,7 +28,9 @@ _GLOBAL(power7_idle) lwz r4,ADDROFF(powersave_nap)(r3) cmpwi 0,r4,0 beqlr + /* fall through */ +_GLOBAL(power7_nap) /* NAP is a state loss, we create a regs frame on the * stack, fill it up with the state we care about and * stick a pointer to it in PACAR1. We really only diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 710f400476de..1a1f2ddfb581 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif /* CONFIG_PPC_STD_MMU_64 */ #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { - if (current->thread.dscr_inherit) { - p->thread.dscr_inherit = 1; - p->thread.dscr = current->thread.dscr; - } else if (0 != dscr_default) { - p->thread.dscr_inherit = 1; - p->thread.dscr = dscr_default; - } else { - p->thread.dscr_inherit = 0; - p->thread.dscr = 0; - } + p->thread.dscr_inherit = current->thread.dscr_inherit; + p->thread.dscr = current->thread.dscr; } #endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 0321007086f7..8d4214afc21d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) struct cpu_messages *info = &per_cpu(ipi_message, cpu); char *message = (char *)&info->messages; + /* + * Order previous accesses before accesses in the IPI handler. + */ + smp_mb(); message[msg] = 1; - mb(); + /* + * cause_ipi functions are required to include a full barrier + * before doing whatever causes the IPI. + */ smp_ops->cause_ipi(cpu, info->data); } @@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void) mb(); /* order any irq clear */ do { - all = xchg_local(&info->messages, 0); + all = xchg(&info->messages, 0); #ifdef __BIG_ENDIAN if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 3529446c2abd..8302af649219 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev, return sprintf(buf, "%lx\n", dscr_default); } +static void update_dscr(void *dummy) +{ + if (!current->thread.dscr_inherit) { + current->thread.dscr = dscr_default; + mtspr(SPRN_DSCR, dscr_default); + } +} + static ssize_t __used store_dscr_default(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev, return -EINVAL; dscr_default = val; + on_each_cpu(update_dscr, NULL, 1); + return count; } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index be171ee73bf8..e49e93191b69 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs) trace_timer_interrupt_exit(regs); } +/* + * Hypervisor decrementer interrupts shouldn't occur but are sometimes + * left pending on exit from a KVM guest. We don't need to do anything + * to clear them, as they are edge-triggered. + */ +void hdec_interrupt(struct pt_regs *regs) +{ +} + #ifdef CONFIG_SUSPEND static void generic_suspend_disable_irqs(void) { diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 158972341a2d..ae0843fa7a61 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs) cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; - mtspr(SPRN_DSCR, regs->gpr[rd]); + current->thread.dscr = regs->gpr[rd]; current->thread.dscr_inherit = 1; + mtspr(SPRN_DSCR, current->thread.dscr); return 0; } #endif diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index dd223b3eb333..17e5b2364312 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr) { int err; - err = __put_user(instr, addr); + __put_user_size(instr, addr, 4, err); if (err) return err; asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 39b159751c35..59213cfaeca9 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu, /* * Update the node maps and sysfs entries for each cpu whose home node - * has changed. + * has changed. Returns 1 when the topology has changed, and 0 otherwise. */ int arch_update_cpu_topology(void) { - int cpu, nid, old_nid; + int cpu, nid, old_nid, changed = 0; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; struct device *dev; @@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void) dev = get_cpu_device(cpu); if (dev) kobject_uevent(&dev->kobj, KOBJ_CHANGE); + changed = 1; } - return 1; + return changed; } static void topology_work_fn(struct work_struct *work) diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 3ef46254c35b..7698b6e13c57 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; - /* If powersave_nap is enabled, use NAP mode, else just - * spin aimlessly - */ - if (!powersave_nap) { - generic_mach_cpu_die(); - return; - } - /* Standard hot unplug procedure */ local_irq_disable(); idle_task_exit(); @@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void) */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { - power7_idle(); + power7_nap(); if (!generic_check_cpu_restart(cpu)) { DBG("CPU%d Unexpected exit while offline !\n", cpu); /* We may be getting an IPI, so we re-enable diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c index 14469cf9df68..df0fc5821469 100644 --- a/arch/powerpc/sysdev/xics/icp-hv.c +++ b/arch/powerpc/sysdev/xics/icp-hv.c @@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value) static inline void icp_hv_set_qirr(int n_cpu , u8 value) { int hw_cpu = get_hard_smp_processor_id(n_cpu); - long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); + long rc; + + /* Make sure all previous accesses are ordered before IPI sending */ + mb(); + rc = plpar_hcall_norets(H_IPI, hw_cpu, value); if (rc != H_SUCCESS) { pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 32e8449640fa..9b94a160fe7f 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -180,7 +180,8 @@ extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) #ifndef CONFIG_64BIT -#define SET_PERSONALITY(ex) set_personality(PER_LINUX) +#define SET_PERSONALITY(ex) \ + set_personality(PER_LINUX | (current->personality & (~PER_MASK))) #else /* CONFIG_64BIT */ #define SET_PERSONALITY(ex) \ do { \ diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h index 7bcc14e395f0..bf2a2ad2f800 100644 --- a/arch/s390/include/asm/posix_types.h +++ b/arch/s390/include/asm/posix_types.h @@ -13,6 +13,7 @@ */ typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; #define __kernel_size_t __kernel_size_t typedef unsigned short __kernel_old_dev_t; @@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t; typedef unsigned short __kernel_ipc_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; -typedef int __kernel_ssize_t; typedef int __kernel_ptrdiff_t; #else /* __s390x__ */ @@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t; typedef int __kernel_ipc_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; -typedef long __kernel_ssize_t; typedef long __kernel_ptrdiff_t; typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index a0a8340daafa..ce26ac3cb162 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data) } static inline int smp_find_processor_id(int address) { return 0; } +static inline int smp_store_status(int cpu) { return 0; } static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index f60238559af3..0748fe0c8a73 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c @@ -114,7 +114,7 @@ static void deliver_alarm(void) skew += this_tick - last_tick; while (skew >= one_tick) { - alarm_handler(SIGVTALRM, NULL); + alarm_handler(SIGVTALRM, NULL, NULL); skew -= one_tick; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dce75b760312..148ed666e311 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; + case MSR_KVM_PV_EOI_EN: + data = vcpu->arch.pv_eoi.msr_val; + break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b65a76133f4f..5141d808e751 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; - if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { + if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { args->op.cmd = MMUEXT_INVLPG_MULTI; args->op.arg1.linear_addr = start; } diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index d4b255463253..76ba0e97e530 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ if (p2m_index(set_pfn)) return false; - for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { topidx = p2m_top_index(pfn); if (!p2m_top[topidx]) diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 78efb0306a44..34d94c762a1e 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, return -EINVAL; /* Sanitise input arguments */ - alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); + alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b16c8a72a2e2..ba7926f5c099 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ config GPIO_MC9S08DZ60 bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" - depends on I2C && MACH_MX35_3DS + depends on I2C=y && MACH_MX35_3DS help Select this to enable the MC9S08DZ60 GPIO driver diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index ae37181798b3..ec48ed512628 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) p->irq_base = irq_alloc_descs(pdata->irq_base, 0, pdata->number_of_pins, numa_node_id()); - if (IS_ERR_VALUE(p->irq_base)) { + if (p->irq_base < 0) { dev_err(&pdev->dev, "cannot get irq_desc\n"); - return -ENXIO; + return p->irq_base; } pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", pdata->gpio_base, pdata->number_of_pins, p->irq_base); diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c index e97016af6443..b62d443e9a59 100644 --- a/drivers/gpio/gpio-rdc321x.c +++ b/drivers/gpio/gpio-rdc321x.c @@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; + rdc321x_gpio_dev->chip.owner = THIS_MODULE; rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a18c4aa68b1e..f1a45997aea8 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname, gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); of_node_put(gg_data.gpiospec.np); - pr_debug("%s exited with status %d\n", __func__, ret); + pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio); return gg_data.out_gpio; } EXPORT_SYMBOL(of_get_named_gpio_flags); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 08a7aa722d6b..6fbfc244748f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - if (!req->flags) + if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a8743c399e83..b7ee230572b7 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -87,6 +87,9 @@ static struct edid_quirk { int product_id; u32 quirks; } edid_quirk_list[] = { + /* ASUS VW222S */ + { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, + /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 30dc22a7156c..8033526bb53b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, (struct drm_connector **) (psb_intel_crtc + 1); psb_intel_crtc->mode_set.num_connectors = 0; psb_intel_cursor_init(dev, psb_intel_crtc); + + /* Set to true so that the pipe is forced off on initial config. */ + psb_intel_crtc->active = true; } int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d9a5372ec56f..60815b861ec2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 * entries. For aliasing ppgtt support we just steal them at the end for * now. */ - first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; + first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); if (!ppgtt) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a69a3d0d3acf..2dfa6cf4886b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1384,7 +1384,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - WARN(hdmi_pipe_enabled(dev_priv, val, pipe), + WARN(hdmi_pipe_enabled(dev_priv, pipe, val), "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); @@ -1404,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - WARN(adpa_pipe_enabled(dev_priv, val, pipe), + WARN(adpa_pipe_enabled(dev_priv, pipe, val), "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); - WARN(lvds_pipe_enabled(dev_priv, val, pipe), + WARN(lvds_pipe_enabled(dev_priv, pipe, val), "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); @@ -1872,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - if (hdmi_pipe_enabled(dev_priv, val, pipe)) { + if (hdmi_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); @@ -1894,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - if (adpa_pipe_enabled(dev_priv, val, pipe)) + if (adpa_pipe_enabled(dev_priv, pipe, val)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); - if (lvds_pipe_enabled(dev_priv, val, pipe)) { + if (lvds_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e05c0d3e3440..e9a6f6aaed85 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Gigabyte GA-D525TUD", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index cc8df4de2d92..7644f31a3778 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: - sprctl |= SPRITE_FORMAT_RGBX888; + sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; pixel_size = 4; break; case DRM_FORMAT_XRGB8888: - sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; + sprctl |= SPRITE_FORMAT_RGBX888; pixel_size = 4; break; case DRM_FORMAT_YUYV: diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 1866dbb49979..c61014442aa9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -736,9 +736,11 @@ nouveau_card_init(struct drm_device *dev) } break; case NV_C0: - nvc0_copy_create(dev, 1); + if (!(nv_rd32(dev, 0x022500) & 0x00000200)) + nvc0_copy_create(dev, 1); case NV_D0: - nvc0_copy_create(dev, 0); + if (!(nv_rd32(dev, 0x022500) & 0x00000100)) + nvc0_copy_create(dev, 0); break; default: break; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index f4d4505fe831..2817101fb167 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -258,7 +258,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); - /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); @@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* power gating is per-pair */ - if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { - struct drm_crtc *other_crtc; - struct radeon_crtc *other_radeon_crtc; - list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { - other_radeon_crtc = to_radeon_crtc(other_crtc); - if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) || - ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) || - ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) || - ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) || - ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) || - ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) { - /* if both crtcs in the pair are off, enable power gating */ - if (other_radeon_crtc->enabled == false) - atombios_powergate_crtc(crtc, ATOM_ENABLE); - break; - } - } - } + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) + atombios_powergate_crtc(crtc, ATOM_ENABLE); /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; @@ -1682,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_atom_ss ss; + int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i] && + rdev->mode_info.crtcs[i]->enabled && + i != radeon_crtc->crtc_id && + radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } + switch (radeon_crtc->pll_id) { case ATOM_PPLL1: case ATOM_PPLL2: @@ -1701,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) default: break; } +done: radeon_crtc->pll_id = -1; } diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7712cf5ab33b..3623b98ed3fe 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); + u8 tmp; if (!ASIC_IS_DCE4(rdev)) return panel_mode; - if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_NUTMEG) - panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; - else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_TRAVIS) { - u8 id[6]; - int i; - for (i = 0; i < 6; i++) - id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i); - if (id[0] == 0x73 && - id[1] == 0x69 && - id[2] == 0x76 && - id[3] == 0x61 && - id[4] == 0x72 && - id[5] == 0x54) + if (dp_bridge != ENCODER_OBJECT_ID_NONE) { + /* DP bridge chips */ + tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || + (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; else - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { - u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + /* eDP */ + tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); if (tmp & 1) panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index f9bc27fe269a..6e8803a1170c 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = NULL; struct radeon_connector_atom_dig *radeon_dig_connector = NULL; @@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || - ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - if (ASIC_IS_DCE6(rdev)) { - /* It seems we need to call ATOM_ENCODER_CMD_SETUP again - * before reenabling encoder on DPMS ON, otherwise we never - * get picture - */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + if (!connector) + dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + else + dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); + + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + atombios_dig_encoder_setup(encoder, + ATOM_ENCODER_CMD_SETUP_PANEL_MODE, + dig->panel_mode); + if (ext_encoder) { + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); } atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { + } else if (ASIC_IS_DCE4(rdev)) { + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + /* enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + } else { + /* setup and enable the encoder and transmitter */ + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + /* some early dce3.2 boards have a bug in their transmitter control table */ + if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { @@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else + } else if (ASIC_IS_DCE4(rdev)) { + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + } else { + /* disable the encoder and transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); @@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *test_encoder; - struct radeon_encoder_atom_dig *dig; + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t dig_enc_in_use = 0; - /* DCE4/5 */ - if (ASIC_IS_DCE4(rdev)) { - dig = radeon_encoder->enc_priv; - if (ASIC_IS_DCE41(rdev)) { + if (ASIC_IS_DCE6(rdev)) { + /* DCE6 */ + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + } + } else if (ASIC_IS_DCE4(rdev)) { + /* DCE4/5 */ + if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { /* ontario follows DCE4 */ if (rdev->family == CHIP_PALM) { if (dig->linkb) @@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; + /* need to call this here rather than in prepare() since we need some crtc info */ + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); @@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - - if (!connector) - dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; - else - dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); - - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - atombios_dig_encoder_setup(encoder, - ATOM_ENCODER_CMD_SETUP_PANEL_MODE, - dig->panel_mode); - } else if (ASIC_IS_DCE4(rdev)) { - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - - /* enable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - - /* setup and enable the encoder and transmitter */ - atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: @@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, break; } - if (ext_encoder) { - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); - else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); - } - atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { @@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) } radeon_atom_output_lock(encoder, true); - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) static void radeon_atom_encoder_commit(struct drm_encoder *encoder) { + /* need to call this here as we need the crtc set up */ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); radeon_atom_output_lock(encoder, false); } @@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE4(rdev)) - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index ab74e6b149e7..f37676d7f217 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -63,6 +63,7 @@ struct r600_cs_track { u32 cb_color_size_idx[8]; /* unused */ u32 cb_target_mask; u32 cb_shader_mask; /* unused */ + bool is_resolve; u32 cb_color_size[8]; u32 vgt_strmout_en; u32 vgt_strmout_buffer_en; @@ -315,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track) track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; track->cb_color_bo_mc[i] = 0xFFFFFFFF; - } + track->cb_color_frag_bo[i] = NULL; + track->cb_color_frag_offset[i] = 0xFFFFFFFF; + track->cb_color_tile_bo[i] = NULL; + track->cb_color_tile_offset[i] = 0xFFFFFFFF; + track->cb_color_mask[i] = 0xFFFFFFFF; + } + track->is_resolve = false; + track->nsamples = 16; + track->log_nsamples = 4; track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; @@ -352,6 +361,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) volatile u32 *ib = p->ib.ptr; unsigned array_mode; u32 format; + /* When resolve is used, the second colorbuffer has always 1 sample. */ + unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; format = G_0280A0_FORMAT(track->cb_color_info[i]); @@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; - array_check.nsamples = track->nsamples; + array_check.nsamples = nsamples; array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { @@ -421,7 +432,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) /* check offset */ tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * - r600_fmt_get_blocksize(format) * track->nsamples; + r600_fmt_get_blocksize(format) * nsamples; switch (array_mode) { default: case V_0280A0_ARRAY_LINEAR_GENERAL: @@ -792,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) */ if (track->cb_dirty) { tmp = track->cb_target_mask; + + /* We must check both colorbuffers for RESOLVE. */ + if (track->is_resolve) { + tmp |= 0xff; + } + for (i = 0; i < 8; i++) { if ((tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ @@ -1281,6 +1298,11 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->nsamples = 1 << tmp; track->cb_dirty = true; break; + case R_028808_CB_COLOR_CONTROL: + tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); + track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; + track->cb_dirty = true; + break; case R_0280A0_CB_COLOR0_INFO: case R_0280A4_CB_COLOR1_INFO: case R_0280A8_CB_COLOR2_INFO: @@ -1416,7 +1438,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case R_028118_CB_COLOR6_MASK: case R_02811C_CB_COLOR7_MASK: tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; - track->cb_color_mask[tmp] = ib[idx]; + track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { track->cb_dirty = true; } diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index bdb69a63062f..fa6f37099ba9 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -66,6 +66,14 @@ #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) +#define R_028808_CB_COLOR_CONTROL 0x28808 +#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4) +#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7) +#define C_028808_SPECIAL_OP 0xFFFFFF8F +#define V_028808_SPECIAL_NORMAL 0x00 +#define V_028808_SPECIAL_DISABLE 0x01 +#define V_028808_SPECIAL_RESOLVE_BOX 0x07 + #define CB_COLOR0_BASE 0x28040 #define CB_COLOR1_BASE 0x28044 #define CB_COLOR2_BASE 0x28048 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index d2e243867ac6..7a3daebd732d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1051,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev, if (rdev->flags & RADEON_IS_AGP) rdev->need_dma32 = true; if ((rdev->flags & RADEON_IS_PCI) && - (rdev->family < CHIP_RS400)) + (rdev->family <= CHIP_RS740)) rdev->need_dma32 = true; dma_bits = rdev->need_dma32 ? 32 : 40; @@ -1346,12 +1346,15 @@ retry: for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]); + ring_sizes[i] = 0; + ring_data[i] = NULL; } r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n", r); if (saved) { + saved = false; radeon_suspend(rdev); goto retry; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 27d22d709c90..8c593ea82c41 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -63,9 +63,10 @@ * 2.19.0 - r600-eg: MSAA textures * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query * 2.21.0 - r600-r700: FMASK and CMASK + * 2.22.0 - r600 only: RESOLVE_BOX allowed */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 21 +#define KMS_DRIVER_MINOR 22 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index f93e45d869f4..20bfbda7b3f1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -744,7 +744,6 @@ r600 0x9400 0x00028C38 CB_CLRCMP_DST 0x00028C3C CB_CLRCMP_MSK 0x00028C34 CB_CLRCMP_SRC -0x00028808 CB_COLOR_CONTROL 0x0002842C CB_FOG_BLUE 0x00028428 CB_FOG_GREEN 0x00028424 CB_FOG_RED diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 8bf8a64e5115..8bcd168fffae 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field, struct hid_driver *hdrv = hid->driver; int ret; - hid_dump_input(hid, usage, value); + if (!list_empty(&hid->debug_list)) + hid_dump_input(hid, usage, value); if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { ret = hdrv->event(hid, field, usage, value); @@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD) + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#endif { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 0f9c146fc00d..4d524b5f52f5 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; @@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 903eef3d3e10..991e85c7325c 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -70,6 +70,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 351d1f4593e7..4ee578948723 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") } + }, { + /* Old interface reads the same sensor for fan0 and fan1 */ + .ident = "Asus M5A78L", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "M5A78L") + } }, { } }; diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c index ff4c0a87a25f..ce68e361558c 100644 --- a/drivers/input/keyboard/imx_keypad.c +++ b/drivers/input/keyboard/imx_keypad.c @@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad) /* Inhibit KDI and KRI interrupts. */ reg_val = readw(keypad->mmio_base + KPSR); reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; writew(reg_val, keypad->mmio_base + KPSR); /* Colums as open drain and disable all rows */ @@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev) input_set_drvdata(input_dev, keypad); /* Ensure that the keypad will stay dormant until opened */ + clk_enable(keypad->clk); imx_keypad_inhibit(keypad); + clk_disable(keypad->clk); error = request_irq(irq, imx_keypad_irq_handler, 0, pdev->name, keypad); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 5ec774d6c82b..6918773ce024 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -177,6 +177,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { }, }, { + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + }, + }, + { + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + }, + }, + { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 002041975de9..532d067a9e07 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A = { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xF4 = - { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0xF8 = + { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x3F = { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, @@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xEF) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, + { USB_DEVICE_WACOM(0xF8) }, { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_LENOVO(0x6004) }, { } diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 9afc777a40a7..b06a5e3a665e 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) { if (tsdata->debug_dir) debugfs_remove_recursive(tsdata->debug_dir); + kfree(tsdata->raw_buffer); } #else @@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client) if (gpio_is_valid(pdata->reset_pin)) gpio_free(pdata->reset_pin); - kfree(tsdata->raw_buffer); kfree(tsdata); return 0; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f1c84decb192..172a768036d8 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (req->cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); @@ -1716,6 +1717,7 @@ force_ro_fail: #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { @@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 322412cec4ee..a53c7c478e05 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -81,6 +81,7 @@ struct atmel_mci_caps { bool has_bad_data_ordering; bool need_reset_after_xfer; bool need_blksz_mul_4; + bool need_notbusy_for_read_ops; }; struct atmel_mci_dma { @@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv) __func__); atmci_set_completed(host, EVENT_XFER_COMPLETE); - if (host->data->flags & MMC_DATA_WRITE) { + if (host->caps.need_notbusy_for_read_ops || + (host->data->flags & MMC_DATA_WRITE)) { atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); state = STATE_WAITING_NOTBUSY; } else if (host->mrq->stop) { @@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) host->caps.has_bad_data_ordering = 1; host->caps.need_reset_after_xfer = 1; host->caps.need_blksz_mul_4 = 1; + host->caps.need_notbusy_for_read_ops = 0; /* keep only major version number */ switch (version & 0xf00) { @@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) case 0x200: host->caps.has_rwproof = 1; host->caps.need_blksz_mul_4 = 0; + host->caps.need_notbusy_for_read_ops = 1; case 0x100: host->caps.has_bad_data_ordering = 0; host->caps.need_reset_after_xfer = 0; diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index 03666174ca48..a17dd7363ceb 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c @@ -49,13 +49,6 @@ #define bfin_write_SDH_CFG bfin_write_RSI_CFG #endif -struct dma_desc_array { - unsigned long start_addr; - unsigned short cfg; - unsigned short x_count; - short x_modify; -} __packed; - struct sdh_host { struct mmc_host *mmc; spinlock_t lock; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 72dc3cde646d..af40d227bece 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) { struct dw_mci *host = slot->host; u32 div; + u32 clk_en_a; if (slot->clock != host->current_speed) { div = host->bus_hz / slot->clock; @@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - /* enable clock */ - mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | - SDMMC_CLKEN_LOW_PWR) << slot->id)); + /* enable clock; only low power if no SDIO */ + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; + if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; + mci_writel(host, CLKENA, clk_en_a); /* inform CIU */ mci_send_cmd(slot, @@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc) return present; } +/* + * Disable lower power mode. + * + * Low power mode will stop the card clock when idle. According to the + * description of the CLKENA register we should disable low power mode + * for SDIO cards if we need SDIO interrupts to work. + * + * This function is fast if low power mode is already disabled. + */ +static void dw_mci_disable_low_power(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + u32 clk_en_a; + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + + clk_en_a = mci_readl(host, CLKENA); + + if (clk_en_a & clken_low_pwr) { + mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | + SDMMC_CMD_PRV_DAT_WAIT, 0); + } +} + static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) { struct dw_mci_slot *slot = mmc_priv(mmc); @@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) /* Enable/disable Slot Specific SDIO interrupt */ int_mask = mci_readl(host, INTMASK); if (enb) { + /* + * Turn off low power mode if it was enabled. This is a bit of + * a heavy operation and we disable / enable IRQs a lot, so + * we'll leave low power mode disabled and it will get + * re-enabled again in dw_mci_setup_bus(). + */ + dw_mci_disable_low_power(slot); + mci_writel(host, INTMASK, (int_mask | SDMMC_INT_SDIO(slot->id))); } else { @@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_RXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ data->bytes_xfered += nbytes; @@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_TXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ data->bytes_xfered += nbytes; @@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) { struct dw_mci *host = dev_id; - u32 status, pending; + u32 pending; unsigned int pass_count = 0; int i; do { - status = mci_readl(host, RINTSTS); pending = mci_readl(host, MINTSTS); /* read-only mask reg */ /* @@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_CMD_ERROR_FLAGS) { mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); - host->cmd_status = status; + host->cmd_status = pending; smp_wmb(); set_bit(EVENT_CMD_COMPLETE, &host->pending_events); } @@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_DATA_ERROR_FLAGS) { /* if there is an error report DATA_ERROR */ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); - host->data_status = status; + host->data_status = pending; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); - if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | - SDMMC_INT_SBE | SDMMC_INT_EBE))) - tasklet_schedule(&host->tasklet); + tasklet_schedule(&host->tasklet); } if (pending & SDMMC_INT_DATA_OVER) { mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) - host->data_status = status; + host->data_status = pending; smp_wmb(); if (host->dir_status == DW_MCI_RECV_STATUS) { if (host->sg != NULL) @@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & SDMMC_INT_CMD_DONE) { mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); - dw_mci_cmd_interrupt(host, status); + dw_mci_cmd_interrupt(host, pending); } if (pending & SDMMC_INT_CD) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index a51f9309ffbb..ad3fcea1269e 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) writel(stat & MXS_MMC_IRQ_BITS, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); + spin_unlock(&host->lock); + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); - spin_unlock(&host->lock); - if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) @@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); - - if (readl(host->base + HW_SSP_STATUS(host)) & - BM_SSP_STATUS_SDIO_IRQ) - mmc_signal_sdio_irq(host->mmc); - } else { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); @@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) } spin_unlock_irqrestore(&host->lock, flags); + + if (enable && readl(host->base + HW_SSP_STATUS(host)) & + BM_SSP_STATUS_SDIO_IRQ) + mmc_signal_sdio_irq(host->mmc); + } static const struct mmc_host_ops mxs_mmc_ops = { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 50e08f03aa65..a5999a74496a 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data) static void mmc_omap_xfer_data(struct mmc_omap_host *host, int write) { - int n; + int n, nwords; if (host->buffer_bytes_left == 0) { host->sg_idx++; @@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) n = 64; if (n > host->buffer_bytes_left) n = host->buffer_bytes_left; + + nwords = n / 2; + nwords += n & 1; /* handle odd number of bytes to transfer */ + host->buffer_bytes_left -= n; host->total_bytes_left -= n; host->data->bytes_xfered += n; if (write) { - __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } else { - __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } + + host->buffer += nwords; } static inline void mmc_omap_report_irq(u16 status) diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index b97b2f5dafdb..d25f9ab9a54d 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) int div = 1; u32 temp; + if (clock == 0) + goto out; + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - if (clock == 0) - goto out; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) pre_div *= 2; diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 437bc193e170..568307cc7caf 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -340,7 +340,7 @@ retry: * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. */ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -353,7 +353,7 @@ write_error: list_add(&new_aeb->u.list, &ai->erase); goto retry; } - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); out_free: ubi_free_vid_hdr(ubi, vid_hdr); return err; diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 4f50145f6483..662c5f7eb0c5 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev) priv = netdev_priv(dev); dev->irq = res_irq->start; - priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); + priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; + if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) + priv->irq_flags |= IRQF_SHARED; priv->reg_base = addr; /* The CAN clock frequency is half the oscillator clock frequency */ priv->can.clock.freq = pdata->osc_freq / 2; diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index 310596175676..b595d3422b9f 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card, const uint8_t *mem, *end, *dat; uint16_t type, len; uint32_t addr; - uint8_t *buf = NULL; + uint8_t *buf = NULL, *new_buf; int buflen = 0; int8_t type_end = 0; @@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card, if (len > buflen) { /* align buflen */ buflen = (len + (1024-1)) & ~(1024-1); - buf = krealloc(buf, buflen, GFP_KERNEL); - if (!buf) { + new_buf = krealloc(buf, buflen, GFP_KERNEL); + if (!new_buf) { ret = -ENOMEM; goto failed; } + buf = new_buf; } /* verify record data */ memcpy_fromio(buf, &dpram[addr + offset], len); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 463b9ec57d80..6d1a24acb77e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params { continue; \ else -#define for_each_napi_rx_queue(bp, var) \ - for ((var) = 0; (var) < bp->num_napi_queues; (var)++) - /* Skip OOO FP */ #define for_each_tx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e879e19eb0d6..af20c6ee2cd9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) */ bnx2x_setup_tc(bp->dev, bp->max_cos); + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); bnx2x_napi_enable(bp); /* set pf load just before approaching the MCP */ @@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index dfa757e74296..21b553229ea4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) bp->num_napi_queues = bp->num_queues; /* Add NAPI objects */ - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, BNX2X_NAPI_WEIGHT); } @@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index fc4e0e3885b0..c37a68d68090 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev, */ static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) { - bnx2x_del_all_napi(bp); bnx2x_disable_msi(bp); BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; bnx2x_set_int_mode(bp); - bnx2x_add_all_napi(bp); } /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 02b5a343b195..21054987257a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -8427,6 +8427,8 @@ unload_error: /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); @@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void poll_bnx2x(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + int i; - disable_irq(bp->pdev->irq); - bnx2x_interrupt(bp->pdev->irq, dev); - enable_irq(bp->pdev->irq); + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + } } #endif @@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, */ bnx2x_set_int_mode(bp); - /* Add all NAPI objects */ - bnx2x_add_all_napi(bp); - rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); @@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - /* Power on: we can't let PCI layer write to us while we are in D3 */ bnx2x_set_power_state(bp, PCI_D0); @@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bnx2x_tx_disable(bp); bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); del_timer_sync(&bp->timer); diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 845b2020f291..138446957786 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; + u16 cfg; spin_lock_irqsave(&lp->lock, flags); if (dev->flags & IFF_PROMISC) @@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev) /* in promiscuous mode, we accept errored packets, * so we have to enable interrupts on them also */ - writereg(dev, PP_RxCFG, - (lp->curr_rx_cfg | - (lp->rx_mode == RX_ALL_ACCEPT) - ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) - : 0)); + cfg = lp->curr_rx_cfg; + if (lp->rx_mode == RX_ALL_ACCEPT) + cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL; + writereg(dev, PP_RxCFG, cfg); spin_unlock_irqrestore(&lp->lock, flags); } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fac97b4bb59..8c63d06ab12b 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter) int num = 0, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - spin_lock_bh(&adapter->mcc_cq_lock); + spin_lock(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ @@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter) if (num) be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); - spin_unlock_bh(&adapter->mcc_cq_lock); + spin_unlock(&adapter->mcc_cq_lock); return status; } @@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (be_error(adapter)) return -EIO; + local_bh_disable(); status = be_process_mcc(adapter); + local_bh_enable(); if (atomic_read(&mcc_obj->q.used) == 0) break; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 90a903d83d87..78b8aa8069f0 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work) /* when interrupts are not yet enabled, just reap any pending * mcc completions */ if (!netif_running(adapter->netdev)) { + local_bh_disable(); be_process_mcc(adapter); + local_bh_enable(); goto reschedule; } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4605f7246687..d3233f59a82e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_RX; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index cd153326c3cf..cb3356c9af80 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -310,6 +310,7 @@ struct e1000_adapter { */ struct e1000_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; + u32 tx_fifo_limit; struct napi_struct napi; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 46c3b1f9ff89..d01a099475a1 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3517,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter) } /* + * Alignment of Tx data is on an arbitrary byte boundary with the + * maximum size per Tx descriptor limited only to the transmit + * allocation of the packet buffer minus 96 bytes with an upper + * limit of 24KB due to receive synchronization limitations. + */ + adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, + 24 << 10); + + /* * Disable Adaptive Interrupt Moderation if 2 full packets cannot * fit in receive buffer. */ @@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) return 1; } -#define E1000_MAX_PER_TXD 8192 -#define E1000_MAX_TXD_PWR 12 - static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, - unsigned int nr_frags, unsigned int mss) + unsigned int nr_frags) { struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; @@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { + BUG_ON(size > tx_ring->count); + if (e1000_desc_unused(tx_ring) >= size) return 0; return __e1000_maybe_stop_tx(tx_ring, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int first; - unsigned int max_per_txd = E1000_MAX_PER_TXD; - unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; unsigned int len = skb_headlen(skb); unsigned int nr_frags; @@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } mss = skb_shinfo(skb)->gso_size; - /* - * The controller does a simple calculation to - * make sure there is enough room in the FIFO before - * initiating the DMA for each buffer. The calc is: - * 4 = ceil(buffer len/mss). To make sure we don't - * overrun the FIFO, adjust the max buffer len if mss - * drops. - */ if (mss) { u8 hdr_len; - max_per_txd = min(mss << 2, max_per_txd); - max_txd_pwr = fls(max_per_txd) - 1; /* * TSO Workaround for 82571/2/3 Controllers -- if skb->data @@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, count++; count++; - count += TXD_USE_COUNT(len, max_txd_pwr); + count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) - count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), - max_txd_pwr); + count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), + adapter->tx_fifo_limit); if (adapter->hw.mac.tx_pkt_filtering) e1000_transfer_dhcp_info(adapter, skb); @@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_flags |= E1000_TX_FLAGS_NO_FCS; /* if count is 0 then mapping error has occurred */ - count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); + count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, + nr_frags); if (count) { skb_tx_timestamp(skb); netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); - + e1000_maybe_stop_tx(tx_ring, + (MAX_SKB_FRAGS * + DIV_ROUND_UP(PAGE_SIZE, + adapter->tx_fifo_limit) + 2)); } else { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; @@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->hw.phy.autoneg_advertised = 0x2f; /* ring size defaults */ - adapter->rx_ring->count = 256; - adapter->tx_ring->count = 256; + adapter->rx_ring->count = E1000_DEFAULT_RXD; + adapter->tx_ring->count = E1000_DEFAULT_TXD; /* * Initial Wake on LAN setting - If APM wake is enabled in diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 8cba2df82b18..5faedd855b77 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, &ip_entry->ip4dst, &ip_entry->pdst); if (rc != 0) { rc = efx_filter_get_ipv4_full( - &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, - &ip_entry->ip4dst, &ip_entry->pdst); + &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, + &ip_entry->ip4src, &ip_entry->psrc); EFX_WARN_ON_PARANOID(rc); ip_mask->ip4src = ~0; ip_mask->psrc = ~0; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e2d083228f3a..719be3912aa9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __COMMON_H__ +#define __COMMON_H__ + #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/phy.h> @@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; + +#endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 9820ec842cc0..223adf95fd03 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -20,6 +20,10 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ + +#ifndef __DESCS_H__ +#define __DESCS_H__ + struct dma_desc { /* Receive descriptor */ union { @@ -166,3 +170,5 @@ enum tdes_csum_insertion { * is not calculated */ cic_full = 3, /* IP header and pseudoheader */ }; + +#endif /* __DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index dd8d6e19dff6..7ee9499a6e38 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -27,6 +27,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DESC_COM_H__ +#define __DESC_COM_H__ + #if defined(CONFIG_STMMAC_RING) static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) { @@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) p->des01.tx.buffer1_size = len; } #endif + +#endif /* __DESC_COM_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 7c6d857a9cc7..2ec6aeae349e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DWMAC100_H__ +#define __DWMAC100_H__ + #include <linux/phy.h> #include "common.h" @@ -119,3 +122,5 @@ enum ttc_control { #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ extern const struct stmmac_dma_ops dwmac100_dma_ops; + +#endif /* __DWMAC100_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index f90fcb5f9573..0e4cacedc1f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -19,6 +19,8 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DWMAC1000_H__ +#define __DWMAC1000_H__ #include <linux/phy.h> #include "common.h" @@ -229,6 +231,7 @@ enum rtc_control { #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 /* Synopsys Core versions */ -#define DWMAC_CORE_3_40 34 +#define DWMAC_CORE_3_40 0x34 extern const struct stmmac_dma_ops dwmac1000_dma_ops; +#endif /* __DWMAC1000_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e678ce39d014..e49c9a0fd6ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DWMAC_DMA_H__ +#define __DWMAC_DMA_H__ + /* DMA CRS Control and Status Register Mapping */ #define DMA_BUS_MODE 0x00001000 /* Bus Mode */ #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ @@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr); extern void dwmac_dma_stop_rx(void __iomem *ioaddr); extern int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); + +#endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index a38352024cb8..67995ef25251 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __MMC_H__ +#define __MMC_H__ + /* MMC control register */ /* When set, all counter are reset */ #define MMC_CNTRL_COUNTER_RESET 0x1 @@ -129,3 +132,5 @@ struct stmmac_counters { extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); + +#endif /* __MMC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index c07cfe989f6e..0c74a702d461 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -33,7 +33,7 @@ #define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ #define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ #define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ -#define MMC_DEFAUL_MASK 0xffffffff +#define MMC_DEFAULT_MASK 0xffffffff /* MMC TX counter registers */ @@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) /* To mask all all interrupts.*/ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) { - writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f2d3665430ad..e872e1da3137 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -20,6 +20,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __STMMAC_H__ +#define __STMMAC_H__ + #define STMMAC_RESOURCE_NAME "stmmaceth" #define DRV_MODULE_VERSION "March_2012" @@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void) { } #endif /* CONFIG_STMMAC_PCI */ + +#endif /* __STMMAC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h index 6863590d184b..aea9b14cdfbe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h @@ -21,6 +21,8 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __STMMAC_TIMER_H__ +#define __STMMAC_TIMER_H__ struct stmmac_timer { void (*timer_start) (unsigned int new_freq); @@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev); extern int tmu2_register_user(void *fnt, void *data); extern void tmu2_unregister_user(void); #endif + +#endif /* __STMMAC_TIMER_H__ */ diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index cd7ee204e94a..a9ca4a03d31b 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct davinci_mdio_data *data = dev_get_drvdata(dev); - if (data->bus) + if (data->bus) { + mdiobus_unregister(data->bus); mdiobus_free(data->bus); + } if (data->clk) clk_put(data->clk); diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c index 24d8566cfd8b..441b4dc79450 100644 --- a/drivers/net/fddi/skfp/pmf.c +++ b/drivers/net/fddi/skfp/pmf.c @@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, sm_pm_get_ls(smc,port_to_mib(smc,port))) ; break ; case SMT_P_REASON : - * (u_long *) to = 0 ; + *(u32 *)to = 0 ; sp_len = 4 ; goto sp_done ; case SMT_P1033 : /* time stamp */ diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 328397c66730..adfab3fc5478 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -413,7 +413,9 @@ static const struct usb_device_id products[] = { /* 5. Gobi 2000 and 3000 devices */ {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ + {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ + {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ @@ -441,6 +443,8 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ + {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */ + {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */ { } /* END */ }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 8531c1caac28..fd4b26d46fd5 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf) netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) - rx_alloc_submit(dev, GFP_KERNEL); + rx_alloc_submit(dev, GFP_NOIO); if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_tx_wake_all_queues(dev->net); diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index 4026c906cc7b..b7e0258887e7 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) case AR5K_EEPROM_MODE_11A: offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_a; - ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; + ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN; break; case AR5K_EEPROM_MODE_11B: offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h index dc2bcfeadeb4..94a9bbea6874 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.h +++ b/drivers/net/wireless/ath/ath5k/eeprom.h @@ -182,6 +182,7 @@ #define AR5K_EEPROM_EEP_DELTA 10 #define AR5K_EEPROM_N_MODES 3 #define AR5K_EEPROM_N_5GHZ_CHAN 10 +#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8 #define AR5K_EEPROM_N_2GHZ_CHAN 3 #define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 #define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 192ad5c1fcc8..a5edebeb0b4f 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl) /* dpc will not be rescheduled */ wl->resched = false; + /* inform publicly that interface is down */ + wl->pub->up = false; + return 0; } diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 95aa8e1683ec..83324b321652 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) return; } len = ETH_ALEN; - ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); + ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid, + &len); if (ret) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 46782f1102ac..a47b306b522c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, const struct fw_img *img; size_t bufsz; + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + /* default is to dump the entire data segment */ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { priv->dbgfs_sram_offset = 0x800000; diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index d9694c58208c..4ffc18dc3a57 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q); /***************************************************** * Error handling ******************************************************/ -int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); +int iwl_dump_fh(struct iwl_trans *trans, char **buf); void iwl_dump_csr(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 39a6ca1f009c..d1a61ba6247a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) } iwl_dump_csr(trans); - iwl_dump_fh(trans, NULL, false); + iwl_dump_fh(trans, NULL); iwl_op_mode_nic_error(trans->op_mode); } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 939c2f78df58..1e86ea2266d4 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd) #undef IWL_CMD } -int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) +int iwl_dump_fh(struct iwl_trans *trans, char **buf) { int i; -#ifdef CONFIG_IWLWIFI_DEBUG - int pos = 0; - size_t bufsz = 0; -#endif static const u32 fh_tbl[] = { FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG, @@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG }; -#ifdef CONFIG_IWLWIFI_DEBUG - if (display) { - bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (buf) { + int pos = 0; + size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; + pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); - } + return pos; } #endif + IWL_ERR(trans, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); - } + return 0; } @@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; - char *buf; + char *buf = NULL; int pos = 0; ssize_t ret = -EFAULT; - ret = pos = iwl_dump_fh(trans, &buf, true); + ret = pos = iwl_dump_fh(trans, &buf); if (buf) { ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 30899901aef5..650f79a1f2bd 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -57,8 +57,7 @@ static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { - struct page *page; - unsigned offset; + int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) @@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev, struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { - struct page *page = NETFRONT_SKB_CB(skb)->page; - void *vaddr = page_address(page); - unsigned offset = NETFRONT_SKB_CB(skb)->offset; - - memcpy(skb->data, vaddr + offset, - skb_headlen(skb)); + int pull_to = NETFRONT_SKB_CB(skb)->pull_to; - if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) - __free_page(page); + __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); @@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget) struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; - unsigned int len; int err; spin_lock(&np->rx_lock); @@ -955,24 +947,13 @@ err: } } - NETFRONT_SKB_CB(skb)->page = - skb_frag_page(&skb_shinfo(skb)->frags[0]); - NETFRONT_SKB_CB(skb)->offset = rx->offset; - - len = rx->status; - if (len > RX_COPY_THRESHOLD) - len = RX_COPY_THRESHOLD; - skb_put(skb, len); + NETFRONT_SKB_CB(skb)->pull_to = rx->status; + if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) + NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; - if (rx->status > len) { - skb_shinfo(skb)->frags[0].page_offset = - rx->offset + len; - skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len); - skb->data_len = rx->status - len; - } else { - __skb_fill_page_desc(skb, 0, NULL, 0, 0); - skb_shinfo(skb)->nr_frags = 0; - } + skb_shinfo(skb)->frags[0].page_offset = rx->offset; + skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); + skb->data_len = rx->status; i = xennet_fill_frags(np, skb, &tmpq); @@ -999,7 +980,7 @@ err: * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ - skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->truesize += skb->data_len - RX_COPY_THRESHOLD; skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 5270f1a99328..d6fd6b6d9d4b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi) { struct drv_dev_and_id *ddi = _ddi; struct device *dev = &ddi->dev->dev; + struct device *parent = dev->parent; int rc; + /* The parent bridge must be in active state when probing */ + if (parent) + pm_runtime_get_sync(parent); /* Unbound PCI devices are always set to disabled and suspended. * During probe, the device is set to enabled and active and the * usage count is incremented. If the driver supports runtime PM, @@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi) pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); } + if (parent) + pm_runtime_put(parent); return rc; } diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 6869009c7393..02d107b15281 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) } struct device_attribute vga_attr = __ATTR_RO(boot_vga); +static void +pci_config_pm_runtime_get(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + if (parent) + pm_runtime_get_sync(parent); + pm_runtime_get_noresume(dev); + /* + * pdev->current_state is set to PCI_D3cold during suspending, + * so wait until suspending completes + */ + pm_runtime_barrier(dev); + /* + * Only need to resume devices in D3cold, because config + * registers are still accessible for devices suspended but + * not in D3cold. + */ + if (pdev->current_state == PCI_D3cold) + pm_runtime_resume(dev); +} + +static void +pci_config_pm_runtime_put(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + pm_runtime_put(dev); + if (parent) + pm_runtime_put_sync(parent); +} + static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, size = count; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { u8 val; pci_user_read_config_byte(dev, off, &val); @@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } @@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, count = size; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { pci_user_write_config_byte(dev, off, data[off - init_off]); off++; @@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index f3ea977a5b1b..ab4bf5a4c2f1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev) dev->pm_cap = pm; dev->d3_delay = PCI_PM_D3_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; + dev->d3cold_allowed = true; dev->d1_support = false; dev->d2_support = false; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3a7eefcb270a..e76b44777dbf 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev) { return 0; } + +static int pcie_port_runtime_idle(struct device *dev) +{ + /* Delay for a short while to prevent too frequent suspend/resume */ + pm_schedule_suspend(dev, 10); + return -EBUSY; +} #else #define pcie_port_runtime_suspend NULL #define pcie_port_runtime_resume NULL +#define pcie_port_runtime_idle NULL #endif static const struct dev_pm_ops pcie_portdrv_pm_ops = { @@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { .resume_noirq = pcie_port_resume_noirq, .runtime_suspend = pcie_port_runtime_suspend, .runtime_resume = pcie_port_runtime_resume, + .runtime_idle = pcie_port_runtime_idle, }; #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) @@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, return status; pci_save_state(dev); + /* + * D3cold may not work properly on some PCIe port, so disable + * it by default. + */ + dev->d3cold_allowed = false; if (!pci_match_id(port_runtime_pm_black_list, dev)) pm_runtime_put_noidle(&dev->dev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6c143b4497ca..9f8a6b79a8ec 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) case PCI_BASE_ADDRESS_MEM_TYPE_32: break; case PCI_BASE_ADDRESS_MEM_TYPE_1M: - dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); + /* 1M mem BAR treated as 32-bit BAR */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: flags |= IORESOURCE_MEM_64; break; default: - dev_warn(&dev->dev, - "mem unknown type %x treated as 32-bit BAR\n", - mem_type); + /* mem unknown type treated as 32-bit BAR */ break; } return flags; @@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u32 l, sz, mask; u16 orig_cmd; struct pci_bus_region region; + bool bar_too_big = false, bar_disabled = false; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; + /* No printks while decoding is disabled! */ if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); pci_write_config_word(dev, PCI_COMMAND, @@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, goto fail; if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { - dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", - pos); + bar_too_big = true; goto fail; } @@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = 0; region.end = sz64; pcibios_bus_to_resource(dev, res, ®ion); + bar_disabled = true; } else { region.start = l64; region.end = l64 + sz64; pcibios_bus_to_resource(dev, res, ®ion); - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", - pos, res); } } else { sz = pci_size(l, sz, mask); @@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = l; region.end = l + sz; pcibios_bus_to_resource(dev, res, ®ion); - - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } - out: + goto out; + + +fail: + res->flags = 0; +out: if (!dev->mmio_always_on) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); + if (bar_too_big) + dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); + if (res->flags && !bar_disabled) + dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); + return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; - fail: - res->flags = 0; - goto out; } static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index 831868904e02..1dd61f402b04 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c @@ -58,6 +58,7 @@ struct sam9_rtc { struct rtc_device *rtcdev; u32 imr; void __iomem *gpbr; + int irq; }; #define rtt_readl(rtc, field) \ @@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) { struct resource *r, *r_gpbr; struct sam9_rtc *rtc; - int ret; + int ret, irq; u32 mr; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) return -ENODEV; } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get interrupt resource\n"); + return irq; + } + rtc = kzalloc(sizeof *rtc, GFP_KERNEL); if (!rtc) return -ENOMEM; + rtc->irq = irq; + /* platform setup code should have handled this; sigh */ if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1); @@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) } /* register irq handler after we know what name we'll use */ - ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, - IRQF_SHARED, + ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); if (ret) { - dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS); + dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); rtc_device_unregister(rtc->rtcdev); goto fail_register; } @@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev) /* disable all interrupts */ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); - free_irq(AT91_ID_SYS, rtc); + free_irq(rtc->irq, rtc); rtc_device_unregister(rtc->rtcdev); @@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev, rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); if (rtc->imr) { if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { - enable_irq_wake(AT91_ID_SYS); + enable_irq_wake(rtc->irq); /* don't let RTTINC cause wakeups */ if (mr & AT91_RTT_RTTINCIEN) rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); @@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev) if (rtc->imr) { if (device_may_wakeup(&pdev->dev)) - disable_irq_wake(AT91_ID_SYS); + disable_irq_wake(rtc->irq); mr = rtt_readl(rtc, MR); rtt_writel(rtc, MR, mr | rtc->imr); } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 40a826a7295f..2fb2b9ea97ec 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) case BIODASDSYMMIO: return dasd_symm_io(device, argp); default: - return -ENOIOCTLCMD; + return -ENOTTY; } } diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index cceae70279f6..654c6921a6d4 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode, break; default: /* if the discipline has an ioctl method try it. */ - if (base->discipline->ioctl) { + rc = -ENOTTY; + if (base->discipline->ioctl) rc = base->discipline->ioctl(block, cmd, argp); - if (rc == -ENOIOCTLCMD) - rc = -EINVAL; - } else - rc = -EINVAL; } dasd_put_device(base); return rc; diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c index 77da6a2f43dc..c03ecdd31e4c 100644 --- a/drivers/video/auo_k190x.c +++ b/drivers/video/auo_k190x.c @@ -987,7 +987,6 @@ err_regfb: fb_dealloc_cmap(&info->cmap); err_cmap: fb_deferred_io_cleanup(info); - kfree(info->fbdefio); err_defio: vfree((void *)info->screen_base); err_irq: @@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); fb_deferred_io_cleanup(info); - kfree(info->fbdefio); vfree((void *)info->screen_base); diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c index 28b1a834906b..61b182bf32a2 100644 --- a/drivers/video/console/bitblit.c +++ b/drivers/video/console/bitblit.c @@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, image.depth = 1; if (attribute) { - buf = kmalloc(cellsize, GFP_KERNEL); + buf = kmalloc(cellsize, GFP_ATOMIC); if (!buf) return; } diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 88e92041d8f0..fdefa8fd72c4 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -449,7 +449,7 @@ static int __init fb_console_setup(char *this_opt) while ((options = strsep(&this_opt, ",")) != NULL) { if (!strncmp(options, "font:", 5)) - strcpy(fontname, options + 5); + strlcpy(fontname, options + 5, sizeof(fontname)); if (!strncmp(options, "scrollback:", 11)) { options += 11; diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c index 00ce1f34b496..57d940be5f3d 100644 --- a/drivers/video/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/mb862xx/mb862xxfbdrv.c @@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd, case MB862XX_L1_SET_CFG: if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) return -EFAULT; + if (l1_cfg->dh == 0 || l1_cfg->dw == 0) + return -EINVAL; if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { /* downscaling */ outreg(cap, GC_CAP_CSC, diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 5d31699fbd3c..f43bfe17b3b6 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -105,6 +105,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) sdi_config_lcd_manager(dssdev); + /* + * LCLK and PCLK divisors are located in shadow registers, and we + * normally write them to DISPC registers when enabling the output. + * However, SDI uses pck-free as source clock for its PLL, and pck-free + * is affected by the divisors. And as we need the PLL before enabling + * the output, we need to write the divisors early. + * + * It seems just writing to the DISPC register is enough, and we don't + * need to care about the shadow register mechanism for pck-free. The + * exact reason for this is unknown. + */ + dispc_mgr_set_clock_div(dssdev->manager->id, + &sdi.mgr_config.clock_info); + dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); if (r) diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 08ec1a7103f2..fc671d3d8004 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -1192,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green, break; if (regno < 16) { - u16 pal; + u32 pal; pal = ((red >> (16 - var->red.length)) << var->red.offset) | ((green >> (16 - var->green.length)) << diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c index 3f75129eb0a9..f7abbaeebcaf 100644 --- a/drivers/watchdog/da9052_wdt.c +++ b/drivers/watchdog/da9052_wdt.c @@ -21,7 +21,6 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> -#include <linux/delay.h> #include <linux/mfd/da9052/reg.h> #include <linux/mfd/da9052/da9052.h> diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 1afb4fba11b4..4d519488d304 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return ret; if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; + dma_mask = dma_alloc_coherent_mask(hwdev, flags); phys = virt_to_phys(ret); dev_addr = xen_phys_to_bus(phys); diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 097e536e8672..03342728bf23 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; - dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); - __pci_reset_function_locked(dev); - /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); dev_data->pci_saved_state = pci_store_saved_state(dev); if (!dev_data->pci_saved_state) dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); - + else { + dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); + __pci_reset_function_locked(dev); + } /* Now disable the device (this also ensures some private device * data is setup before we export) */ diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index a256f3b2a845..ff6475f409d6 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, ret = extent_from_logical(fs_info, logical, path, &found_key); btrfs_release_path(path); - if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) - ret = -EINVAL; if (ret < 0) return ret; + if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) + return -EINVAL; extent_item_pos = logical - found_key.objectid; ret = iterate_extent_inodes(fs_info, found_key.objectid, diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 86eff48dab78..43d1c5a3a030 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace) btrfs_compress_op[idx]->free_workspace(workspace); atomic_dec(alloc_workspace); wake: + smp_mb(); if (waitqueue_active(workspace_wait)) wake_up(workspace_wait); } diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 9d7621f271ff..6d183f60d63a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -421,12 +421,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, spin_unlock(&fs_info->tree_mod_seq_lock); /* - * we removed the lowest blocker from the blocker list, so there may be - * more processible delayed refs. - */ - wake_up(&fs_info->tree_mod_seq_wait); - - /* * anything that's lower than the lowest existing (read: blocked) * sequence number can be removed from the tree. */ @@ -631,6 +625,9 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) u32 nritems; int ret; + if (btrfs_header_level(eb) == 0) + return; + nritems = btrfs_header_nritems(eb); for (i = nritems - 1; i >= 0; i--) { ret = tree_mod_log_insert_key_locked(fs_info, eb, i, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4bab807227ad..0d195b507660 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1252,7 +1252,6 @@ struct btrfs_fs_info { atomic_t tree_mod_seq; struct list_head tree_mod_seq_list; struct seq_list tree_mod_seq_elem; - wait_queue_head_t tree_mod_seq_wait; /* this protects tree_mod_log */ rwlock_t tree_mod_log_lock; @@ -3192,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u32 *dst); int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, - struct bio *bio, u64 logical_offset, u32 *dst); + struct bio *bio, u64 logical_offset); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 335605c8ceab..07d5eeb1e6f1 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) rb_erase(&delayed_item->rb_node, root); delayed_item->delayed_node->count--; - atomic_dec(&delayed_root->items); - if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && + if (atomic_dec_return(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND && waitqueue_active(&delayed_root->wait)) wake_up(&delayed_root->wait); } @@ -1028,9 +1028,10 @@ do_again: btrfs_release_delayed_item(prev); ret = 0; btrfs_release_path(path); - if (curr) + if (curr) { + mutex_unlock(&node->mutex); goto do_again; - else + } else goto delete_fail; } @@ -1055,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) delayed_node->count--; delayed_root = delayed_node->root->fs_info->delayed_root; - atomic_dec(&delayed_root->items); - if (atomic_read(&delayed_root->items) < + if (atomic_dec_return(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && waitqueue_active(&delayed_root->wait)) wake_up(&delayed_root->wait); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index da7419ed01bb..ae9411773397 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -38,17 +38,14 @@ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, struct btrfs_delayed_tree_ref *ref1) { - if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { - if (ref1->root < ref2->root) - return -1; - if (ref1->root > ref2->root) - return 1; - } else { - if (ref1->parent < ref2->parent) - return -1; - if (ref1->parent > ref2->parent) - return 1; - } + if (ref1->root < ref2->root) + return -1; + if (ref1->root > ref2->root) + return 1; + if (ref1->parent < ref2->parent) + return -1; + if (ref1->parent > ref2->parent) + return 1; return 0; } @@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, * type of the delayed backrefs and content of delayed backrefs. */ static int comp_entry(struct btrfs_delayed_ref_node *ref2, - struct btrfs_delayed_ref_node *ref1) + struct btrfs_delayed_ref_node *ref1, + bool compare_seq) { if (ref1->bytenr < ref2->bytenr) return -1; @@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2, if (ref1->type > ref2->type) return 1; /* merging of sequenced refs is not allowed */ - if (ref1->seq < ref2->seq) - return -1; - if (ref1->seq > ref2->seq) - return 1; + if (compare_seq) { + if (ref1->seq < ref2->seq) + return -1; + if (ref1->seq > ref2->seq) + return 1; + } if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), @@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root, entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, rb_node); - cmp = comp_entry(entry, ins); + cmp = comp_entry(entry, ins, 1); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) @@ -233,6 +233,114 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, return 0; } +static void inline drop_delayed_ref(struct btrfs_trans_handle *trans, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_node *ref) +{ + rb_erase(&ref->rb_node, &delayed_refs->root); + ref->in_tree = 0; + btrfs_put_delayed_ref(ref); + delayed_refs->num_entries--; + if (trans->delayed_ref_updates) + trans->delayed_ref_updates--; +} + +static int merge_ref(struct btrfs_trans_handle *trans, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_node *ref, u64 seq) +{ + struct rb_node *node; + int merged = 0; + int mod = 0; + int done = 0; + + node = rb_prev(&ref->rb_node); + while (node) { + struct btrfs_delayed_ref_node *next; + + next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); + node = rb_prev(node); + if (next->bytenr != ref->bytenr) + break; + if (seq && next->seq >= seq) + break; + if (comp_entry(ref, next, 0)) + continue; + + if (ref->action == next->action) { + mod = next->ref_mod; + } else { + if (ref->ref_mod < next->ref_mod) { + struct btrfs_delayed_ref_node *tmp; + + tmp = ref; + ref = next; + next = tmp; + done = 1; + } + mod = -next->ref_mod; + } + + merged++; + drop_delayed_ref(trans, delayed_refs, next); + ref->ref_mod += mod; + if (ref->ref_mod == 0) { + drop_delayed_ref(trans, delayed_refs, ref); + break; + } else { + /* + * You can't have multiples of the same ref on a tree + * block. + */ + WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || + ref->type == BTRFS_SHARED_BLOCK_REF_KEY); + } + + if (done) + break; + node = rb_prev(&ref->rb_node); + } + + return merged; +} + +void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head) +{ + struct rb_node *node; + u64 seq = 0; + + spin_lock(&fs_info->tree_mod_seq_lock); + if (!list_empty(&fs_info->tree_mod_seq_list)) { + struct seq_list *elem; + + elem = list_first_entry(&fs_info->tree_mod_seq_list, + struct seq_list, list); + seq = elem->seq; + } + spin_unlock(&fs_info->tree_mod_seq_lock); + + node = rb_prev(&head->node.rb_node); + while (node) { + struct btrfs_delayed_ref_node *ref; + + ref = rb_entry(node, struct btrfs_delayed_ref_node, + rb_node); + if (ref->bytenr != head->node.bytenr) + break; + + /* We can't merge refs that are outside of our seq count */ + if (seq && ref->seq >= seq) + break; + if (merge_ref(trans, delayed_refs, ref, seq)) + node = rb_prev(&head->node.rb_node); + else + node = rb_prev(node); + } +} + int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, u64 seq) @@ -336,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans, * every changing the extent allocation tree. */ existing->ref_mod--; - if (existing->ref_mod == 0) { - rb_erase(&existing->rb_node, - &delayed_refs->root); - existing->in_tree = 0; - btrfs_put_delayed_ref(existing); - delayed_refs->num_entries--; - if (trans->delayed_ref_updates) - trans->delayed_ref_updates--; - } else { + if (existing->ref_mod == 0) + drop_delayed_ref(trans, delayed_refs, existing); + else WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || existing->type == BTRFS_SHARED_BLOCK_REF_KEY); - } } else { WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || existing->type == BTRFS_SHARED_BLOCK_REF_KEY); @@ -662,9 +763,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, num_bytes, parent, ref_root, level, action, for_cow); - if (!need_ref_seq(for_cow, ref_root) && - waitqueue_active(&fs_info->tree_mod_seq_wait)) - wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); if (need_ref_seq(for_cow, ref_root)) btrfs_qgroup_record_ref(trans, &ref->node, extent_op); @@ -713,9 +811,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, num_bytes, parent, ref_root, owner, offset, action, for_cow); - if (!need_ref_seq(for_cow, ref_root) && - waitqueue_active(&fs_info->tree_mod_seq_wait)) - wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); if (need_ref_seq(for_cow, ref_root)) btrfs_qgroup_record_ref(trans, &ref->node, extent_op); @@ -744,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, num_bytes, BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data); - if (waitqueue_active(&fs_info->tree_mod_seq_wait)) - wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); return 0; } diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 0d7c90c366b6..ab5300595847 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -167,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, struct btrfs_delayed_extent_op *extent_op); +void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head); struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 62e0cafd6e25..22e98e04c2ea 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, ret = read_extent_buffer_pages(io_tree, eb, start, WAIT_COMPLETE, btree_get_extent, mirror_num); - if (!ret && !verify_parent_transid(io_tree, eb, + if (!ret) { + if (!verify_parent_transid(io_tree, eb, parent_transid, 0)) - break; + break; + else + ret = -EIO; + } /* * This buffer's crc is fine, but its contents are corrupted, so @@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work) limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; - atomic_dec(&fs_info->nr_async_submits); - - if (atomic_read(&fs_info->nr_async_submits) < limit && + if (atomic_dec_return(&fs_info->nr_async_submits) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); @@ -2032,8 +2034,6 @@ int open_ctree(struct super_block *sb, fs_info->free_chunk_space = 0; fs_info->tree_mod_log = RB_ROOT; - init_waitqueue_head(&fs_info->tree_mod_seq_wait); - /* readahead state */ INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); spin_lock_init(&fs_info->reada_lock); @@ -2528,8 +2528,7 @@ retry_root_backup: goto fail_trans_kthread; /* do not make disk changes in broken FS */ - if (btrfs_super_log_root(disk_super) != 0 && - !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { + if (btrfs_super_log_root(disk_super) != 0) { u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { @@ -3189,30 +3188,14 @@ int close_ctree(struct btrfs_root *root) /* clear out the rbtree of defraggable inodes */ btrfs_run_defrag_inodes(fs_info); - /* - * Here come 2 situations when btrfs is broken to flip readonly: - * - * 1. when btrfs flips readonly somewhere else before - * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, - * and btrfs will skip to write sb directly to keep - * ERROR state on disk. - * - * 2. when btrfs flips readonly just in btrfs_commit_super, - * and in such case, btrfs cannot write sb via btrfs_commit_super, - * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, - * btrfs will cleanup all FS resources first and write sb then. - */ if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } - if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { - ret = btrfs_error_commit_super(root); - if (ret) - printk(KERN_ERR "btrfs: commit super ret %d\n", ret); - } + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) + btrfs_error_commit_super(root); btrfs_put_block_group_cache(fs_info); @@ -3434,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, if (read_only) return 0; - if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { - printk(KERN_WARNING "warning: mount fs with errors, " - "running btrfsck is recommended\n"); - } - return 0; } -int btrfs_error_commit_super(struct btrfs_root *root) +void btrfs_error_commit_super(struct btrfs_root *root) { - int ret; - mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); @@ -3455,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root) /* cleanup FS via transaction */ btrfs_cleanup_transaction(root); - - ret = write_ctree_super(NULL, root, 0); - - return ret; } static void btrfs_destroy_ordered_operations(struct btrfs_root *root) @@ -3782,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) /* FIXME: cleanup wait for commit */ t->in_commit = 1; t->blocked = 1; + smp_mb(); if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) wake_up(&root->fs_info->transaction_blocked_wait); t->blocked = 0; + smp_mb(); if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); t->commit_done = 1; + smp_mb(); if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 95e147eea239..c5b00a735fef 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_commit_super(struct btrfs_root *root); -int btrfs_error_commit_super(struct btrfs_root *root); +void btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4e1b153b7c47..ba58024d40d3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2252,6 +2252,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, } /* + * We need to try and merge add/drops of the same ref since we + * can run into issues with relocate dropping the implicit ref + * and then it being added back again before the drop can + * finish. If we merged anything we need to re-loop so we can + * get a good ref. + */ + btrfs_merge_delayed_refs(trans, fs_info, delayed_refs, + locked_ref); + + /* * locked_ref is the head node, so we have to go one * node back for any delayed ref updates */ @@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, ref->in_tree = 0; rb_erase(&ref->rb_node, &delayed_refs->root); delayed_refs->num_entries--; - /* - * we modified num_entries, but as we're currently running - * delayed refs, skip - * wake_up(&delayed_refs->seq_wait); - * here. - */ + if (locked_ref) { + /* + * when we play the delayed ref, also correct the + * ref_mod on head + */ + switch (ref->action) { + case BTRFS_ADD_DELAYED_REF: + case BTRFS_ADD_DELAYED_EXTENT: + locked_ref->node.ref_mod -= ref->ref_mod; + break; + case BTRFS_DROP_DELAYED_REF: + locked_ref->node.ref_mod += ref->ref_mod; + break; + default: + WARN_ON(1); + } + } spin_unlock(&delayed_refs->lock); ret = run_one_delayed_ref(trans, root, ref, extent_op, @@ -2350,22 +2371,6 @@ next: return count; } -static void wait_for_more_refs(struct btrfs_fs_info *fs_info, - struct btrfs_delayed_ref_root *delayed_refs, - unsigned long num_refs, - struct list_head *first_seq) -{ - spin_unlock(&delayed_refs->lock); - pr_debug("waiting for more refs (num %ld, first %p)\n", - num_refs, first_seq); - wait_event(fs_info->tree_mod_seq_wait, - num_refs != delayed_refs->num_entries || - fs_info->tree_mod_seq_list.next != first_seq); - pr_debug("done waiting for more refs (num %ld, first %p)\n", - delayed_refs->num_entries, fs_info->tree_mod_seq_list.next); - spin_lock(&delayed_refs->lock); -} - #ifdef SCRAMBLE_DELAYED_REFS /* * Normally delayed refs get processed in ascending bytenr order. This @@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; struct list_head cluster; - struct list_head *first_seq = NULL; int ret; u64 delayed_start; int run_all = count == (unsigned long)-1; int run_most = 0; - unsigned long num_refs = 0; - int consider_waiting; + int loops; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, delayed_refs = &trans->transaction->delayed_refs; INIT_LIST_HEAD(&cluster); again: - consider_waiting = 0; + loops = 0; spin_lock(&delayed_refs->lock); #ifdef SCRAMBLE_DELAYED_REFS @@ -2512,31 +2515,6 @@ again: if (ret) break; - if (delayed_start >= delayed_refs->run_delayed_start) { - if (consider_waiting == 0) { - /* - * btrfs_find_ref_cluster looped. let's do one - * more cycle. if we don't run any delayed ref - * during that cycle (because we can't because - * all of them are blocked) and if the number of - * refs doesn't change, we avoid busy waiting. - */ - consider_waiting = 1; - num_refs = delayed_refs->num_entries; - first_seq = root->fs_info->tree_mod_seq_list.next; - } else { - wait_for_more_refs(root->fs_info, delayed_refs, - num_refs, first_seq); - /* - * after waiting, things have changed. we - * dropped the lock and someone else might have - * run some refs, built new clusters and so on. - * therefore, we restart staleness detection. - */ - consider_waiting = 0; - } - } - ret = run_clustered_refs(trans, root, &cluster); if (ret < 0) { spin_unlock(&delayed_refs->lock); @@ -2549,9 +2527,26 @@ again: if (count == 0) break; - if (ret || delayed_refs->run_delayed_start == 0) { + if (delayed_start >= delayed_refs->run_delayed_start) { + if (loops == 0) { + /* + * btrfs_find_ref_cluster looped. let's do one + * more cycle. if we don't run any delayed ref + * during that cycle (because we can't because + * all of them are blocked), bail out. + */ + loops = 1; + } else { + /* + * no runnable refs left, stop trying + */ + BUG_ON(run_all); + break; + } + } + if (ret) { /* refs were run, let's reset staleness detection */ - consider_waiting = 0; + loops = 0; } } @@ -3007,17 +3002,16 @@ again: } spin_unlock(&block_group->lock); - num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); + /* + * Try to preallocate enough space based on how big the block group is. + * Keep in mind this has to include any pinned space which could end up + * taking up quite a bit since it's not folded into the other space + * cache. + */ + num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024); if (!num_pages) num_pages = 1; - /* - * Just to make absolutely sure we have enough space, we're going to - * preallocate 12 pages worth of space for each block group. In - * practice we ought to use at most 8, but we need extra space so we can - * add our header and have a terminator between the extents and the - * bitmaps. - */ num_pages *= 16; num_pages *= PAGE_CACHE_SIZE; @@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) if (root->fs_info->quota_enabled) { ret = btrfs_qgroup_reserve(root, num_bytes + nr_extents * root->leafsize); - if (ret) + if (ret) { + mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); return ret; + } } ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); @@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, rb_erase(&head->node.rb_node, &delayed_refs->root); delayed_refs->num_entries--; - smp_mb(); - if (waitqueue_active(&root->fs_info->tree_mod_seq_wait)) - wake_up(&root->fs_info->tree_mod_seq_wait); /* * we don't take a ref on the node because we're removing it from the diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 45c81bb4ac82..4c878476bb91 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, state, mirror); - if (ret) { - /* no IO indicated but software detected errors - * in the block, either checksum errors or - * issues with the contents */ - struct btrfs_root *root = - BTRFS_I(page->mapping->host)->root; - struct btrfs_device *device; - + if (ret) uptodate = 0; - device = btrfs_find_device_for_logical( - root, start, mirror); - if (device) - btrfs_dev_stat_inc_and_print(device, - BTRFS_DEV_STAT_CORRUPTION_ERRS); - } else { + else clean_io_failure(start, page); - } } if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b45b9de0c21d..857d93cd01dc 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, } int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, - struct bio *bio, u64 offset, u32 *dst) + struct bio *bio, u64 offset) { - return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); + return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1); } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6e8f416773d4..ec154f954646 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1008,9 +1008,7 @@ static noinline void async_cow_submit(struct btrfs_work *work) nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> PAGE_CACHE_SHIFT; - atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); - - if (atomic_read(&root->fs_info->async_delalloc_pages) < + if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 5 * 1024 * 1024 && waitqueue_active(&root->fs_info->async_submit_wait)) wake_up(&root->fs_info->async_submit_wait); @@ -1885,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) trans = btrfs_join_transaction_nolock(root); else trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) - return PTR_ERR(trans); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; + goto out; + } trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode_fallback(trans, root, inode); if (ret) /* -ENOMEM or corruption */ @@ -3174,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, btrfs_i_size_write(dir, dir->i_size - name_len * 2); inode_inc_iversion(dir); dir->i_mtime = dir->i_ctime = CURRENT_TIME; - ret = btrfs_update_inode(trans, root, dir); + ret = btrfs_update_inode_fallback(trans, root, dir); if (ret) btrfs_abort_transaction(trans, root, ret); out: @@ -5774,18 +5775,112 @@ out: return ret; } +static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, + struct extent_state **cached_state, int writing) +{ + struct btrfs_ordered_extent *ordered; + int ret = 0; + + while (1) { + lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, + 0, cached_state); + /* + * We're concerned with the entire range that we're going to be + * doing DIO to, so we need to make sure theres no ordered + * extents in this range. + */ + ordered = btrfs_lookup_ordered_range(inode, lockstart, + lockend - lockstart + 1); + + /* + * We need to make sure there are no buffered pages in this + * range either, we could have raced between the invalidate in + * generic_file_direct_write and locking the extent. The + * invalidate needs to happen so that reads after a write do not + * get stale data. + */ + if (!ordered && (!writing || + !test_range_bit(&BTRFS_I(inode)->io_tree, + lockstart, lockend, EXTENT_UPTODATE, 0, + *cached_state))) + break; + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + cached_state, GFP_NOFS); + + if (ordered) { + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + } else { + /* Screw you mmap */ + ret = filemap_write_and_wait_range(inode->i_mapping, + lockstart, + lockend); + if (ret) + break; + + /* + * If we found a page that couldn't be invalidated just + * fall back to buffered. + */ + ret = invalidate_inode_pages2_range(inode->i_mapping, + lockstart >> PAGE_CACHE_SHIFT, + lockend >> PAGE_CACHE_SHIFT); + if (ret) + break; + } + + cond_resched(); + } + + return ret; +} + static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct extent_map *em; struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_state *cached_state = NULL; u64 start = iblock << inode->i_blkbits; + u64 lockstart, lockend; u64 len = bh_result->b_size; struct btrfs_trans_handle *trans; + int unlock_bits = EXTENT_LOCKED; + int ret; + + if (create) { + ret = btrfs_delalloc_reserve_space(inode, len); + if (ret) + return ret; + unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; + } else { + len = min_t(u64, len, root->sectorsize); + } + + lockstart = start; + lockend = start + len - 1; + + /* + * If this errors out it's because we couldn't invalidate pagecache for + * this range and we need to fallback to buffered. + */ + if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) + return -ENOTBLK; + + if (create) { + ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, EXTENT_DELALLOC, NULL, + &cached_state, GFP_NOFS); + if (ret) + goto unlock_err; + } em = btrfs_get_extent(inode, NULL, 0, start, len, 0); - if (IS_ERR(em)) - return PTR_ERR(em); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto unlock_err; + } /* * Ok for INLINE and COMPRESSED extents we need to fallback on buffered @@ -5804,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || em->block_start == EXTENT_MAP_INLINE) { free_extent_map(em); - return -ENOTBLK; + ret = -ENOTBLK; + goto unlock_err; } /* Just a good old fashioned hole, return */ if (!create && (em->block_start == EXTENT_MAP_HOLE || test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { free_extent_map(em); - /* DIO will do one hole at a time, so just unlock a sector */ - unlock_extent(&BTRFS_I(inode)->io_tree, start, - start + root->sectorsize - 1); - return 0; + ret = 0; + goto unlock_err; } /* @@ -5827,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * */ if (!create) { - len = em->len - (start - em->start); - goto map; + len = min(len, em->len - (start - em->start)); + lockstart = start + len; + goto unlock; } if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || @@ -5860,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, btrfs_end_transaction(trans, root); if (ret) { free_extent_map(em); - return ret; + goto unlock_err; } goto unlock; } @@ -5873,14 +5968,12 @@ must_cow: */ len = bh_result->b_size; em = btrfs_new_extent_direct(inode, em, start, len); - if (IS_ERR(em)) - return PTR_ERR(em); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto unlock_err; + } len = min(len, em->len - (start - em->start)); unlock: - clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, - EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, - 0, NULL, GFP_NOFS); -map: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; @@ -5898,9 +5991,44 @@ map: i_size_write(inode, start + len); } + /* + * In the case of write we need to clear and unlock the entire range, + * in the case of read we need to unlock only the end area that we + * aren't using if there is any left over space. + */ + if (lockstart < lockend) { + if (create && len < lockend - lockstart) { + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockstart + len - 1, unlock_bits, 1, 0, + &cached_state, GFP_NOFS); + /* + * Beside unlock, we also need to cleanup reserved space + * for the left range by attaching EXTENT_DO_ACCOUNTING. + */ + clear_extent_bit(&BTRFS_I(inode)->io_tree, + lockstart + len, lockend, + unlock_bits | EXTENT_DO_ACCOUNTING, + 1, 0, NULL, GFP_NOFS); + } else { + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, unlock_bits, 1, 0, + &cached_state, GFP_NOFS); + } + } else { + free_extent_state(cached_state); + } + free_extent_map(em); return 0; + +unlock_err: + if (create) + unlock_bits |= EXTENT_DO_ACCOUNTING; + + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, + unlock_bits, 1, 0, &cached_state, GFP_NOFS); + return ret; } struct btrfs_dio_private { @@ -5908,7 +6036,6 @@ struct btrfs_dio_private { u64 logical_offset; u64 disk_bytenr; u64 bytes; - u32 *csums; void *private; /* number of bios pending for this dio */ @@ -5928,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) struct inode *inode = dip->inode; struct btrfs_root *root = BTRFS_I(inode)->root; u64 start; - u32 *private = dip->csums; start = dip->logical_offset; do { @@ -5936,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) struct page *page = bvec->bv_page; char *kaddr; u32 csum = ~(u32)0; + u64 private = ~(u32)0; unsigned long flags; + if (get_state_private(&BTRFS_I(inode)->io_tree, + start, &private)) + goto failed; local_irq_save(flags); kaddr = kmap_atomic(page); csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, @@ -5947,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) local_irq_restore(flags); flush_dcache_page(bvec->bv_page); - if (csum != *private) { + if (csum != private) { +failed: printk(KERN_ERR "btrfs csum failed ino %llu off" " %llu csum %u private %u\n", (unsigned long long)btrfs_ino(inode), (unsigned long long)start, - csum, *private); + csum, (unsigned)private); err = -EIO; } } start += bvec->bv_len; - private++; bvec++; } while (bvec <= bvec_end); @@ -5966,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) dip->logical_offset + dip->bytes - 1); bio->bi_private = dip->private; - kfree(dip->csums); kfree(dip); /* If we had a csum failure make sure to clear the uptodate flag */ @@ -6072,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, int rw, u64 file_offset, int skip_sum, - u32 *csums, int async_submit) + int async_submit) { int write = rw & REQ_WRITE; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -6105,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, if (ret) goto err; } else if (!skip_sum) { - ret = btrfs_lookup_bio_sums_dio(root, inode, bio, - file_offset, csums); + ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset); if (ret) goto err; } @@ -6132,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, u64 submit_len = 0; u64 map_length; int nr_pages = 0; - u32 *csums = dip->csums; int ret = 0; int async_submit = 0; - int write = rw & REQ_WRITE; map_length = orig_bio->bi_size; ret = btrfs_map_block(map_tree, READ, start_sector << 9, @@ -6171,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, atomic_inc(&dip->pending_bios); ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums, async_submit); + async_submit); if (ret) { bio_put(bio); atomic_dec(&dip->pending_bios); goto out_err; } - /* Write's use the ordered csums */ - if (!write && !skip_sum) - csums = csums + nr_pages; start_sector += submit_len >> 9; file_offset += submit_len; @@ -6210,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums, async_submit); + async_submit); if (!ret) return 0; @@ -6246,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, ret = -ENOMEM; goto free_ordered; } - dip->csums = NULL; - - /* Write's use the ordered csum stuff, so we don't need dip->csums */ - if (!write && !skip_sum) { - dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); - if (!dip->csums) { - kfree(dip); - ret = -ENOMEM; - goto free_ordered; - } - } dip->private = bio->bi_private; dip->inode = inode; @@ -6341,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io out: return retval; } + static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - struct btrfs_ordered_extent *ordered; - struct extent_state *cached_state = NULL; - u64 lockstart, lockend; - ssize_t ret; - int writing = rw & WRITE; - int write_bits = 0; - size_t count = iov_length(iov, nr_segs); if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, - offset, nr_segs)) { + offset, nr_segs)) return 0; - } - - lockstart = offset; - lockend = offset + count - 1; - - if (writing) { - ret = btrfs_delalloc_reserve_space(inode, count); - if (ret) - goto out; - } - - while (1) { - lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, - 0, &cached_state); - /* - * We're concerned with the entire range that we're going to be - * doing DIO to, so we need to make sure theres no ordered - * extents in this range. - */ - ordered = btrfs_lookup_ordered_range(inode, lockstart, - lockend - lockstart + 1); - - /* - * We need to make sure there are no buffered pages in this - * range either, we could have raced between the invalidate in - * generic_file_direct_write and locking the extent. The - * invalidate needs to happen so that reads after a write do not - * get stale data. - */ - if (!ordered && (!writing || - !test_range_bit(&BTRFS_I(inode)->io_tree, - lockstart, lockend, EXTENT_UPTODATE, 0, - cached_state))) - break; - - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state, GFP_NOFS); - - if (ordered) { - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - } else { - /* Screw you mmap */ - ret = filemap_write_and_wait_range(file->f_mapping, - lockstart, - lockend); - if (ret) - goto out; - - /* - * If we found a page that couldn't be invalidated just - * fall back to buffered. - */ - ret = invalidate_inode_pages2_range(file->f_mapping, - lockstart >> PAGE_CACHE_SHIFT, - lockend >> PAGE_CACHE_SHIFT); - if (ret) { - if (ret == -EBUSY) - ret = 0; - goto out; - } - } - - cond_resched(); - } - /* - * we don't use btrfs_set_extent_delalloc because we don't want - * the dirty or uptodate bits - */ - if (writing) { - write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; - ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - EXTENT_DELALLOC, NULL, &cached_state, - GFP_NOFS); - if (ret) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockend, EXTENT_LOCKED | write_bits, - 1, 0, &cached_state, GFP_NOFS); - goto out; - } - } - - free_extent_state(cached_state); - cached_state = NULL; - - ret = __blockdev_direct_IO(rw, iocb, inode, + return __blockdev_direct_IO(rw, iocb, inode, BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, btrfs_submit_direct, 0); - - if (ret < 0 && ret != -EIOCBQUEUED) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); - } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { - /* - * We're falling back to buffered, unlock the section we didn't - * do IO on. - */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); - } -out: - free_extent_state(cached_state); - return ret; } static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7bb755677a22..9df50fa8a078 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root, uuid_le_gen(&new_uuid); memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); root_item.otime.sec = cpu_to_le64(cur_time.tv_sec); - root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec); + root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec); root_item.ctime = root_item.otime; btrfs_set_root_ctransid(&root_item, trans->transid); btrfs_set_root_otransid(&root_item, trans->transid); diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index a44eff074805..2a1762c66041 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) { if (eb->lock_nested) { read_lock(&eb->lock); - if (&eb->lock_nested && current->pid == eb->lock_owner) { + if (eb->lock_nested && current->pid == eb->lock_owner) { read_unlock(&eb->lock); return; } diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index bc424ae5a81a..38b42e7bc91d 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1364,13 +1364,17 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, spin_lock(&fs_info->qgroup_lock); dstgroup = add_qgroup_rb(fs_info, objectid); - if (!dstgroup) + if (IS_ERR(dstgroup)) { + ret = PTR_ERR(dstgroup); goto unlock; + } if (srcid) { srcgroup = find_qgroup_rb(fs_info, srcid); - if (!srcgroup) + if (!srcgroup) { + ret = -EINVAL; goto unlock; + } dstgroup->rfer = srcgroup->rfer - level_size; dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size; srcgroup->excl = level_size; @@ -1379,8 +1383,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, qgroup_dirty(fs_info, srcgroup); } - if (!inherit) + if (!inherit) { + ret = -EINVAL; goto unlock; + } i_qgroups = (u64 *)(inherit + 1); for (i = 0; i < inherit->num_qgroups; ++i) { diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6bb465cca20f..10d8e4d88071 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans, struct timespec ct = CURRENT_TIME; spin_lock(&root->root_times_lock); - item->ctransid = trans->transid; + item->ctransid = cpu_to_le64(trans->transid); item->ctime.sec = cpu_to_le64(ct.tv_sec); - item->ctime.nsec = cpu_to_le64(ct.tv_nsec); + item->ctime.nsec = cpu_to_le32(ct.tv_nsec); spin_unlock(&root->root_times_lock); } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f2eb24c477a3..83d6f9f9c220 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -838,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait) struct btrfs_trans_handle *trans; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root = fs_info->tree_root; - int ret; trace_btrfs_sync_fs(wait); @@ -849,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); - trans = btrfs_start_transaction(root, 0); + spin_lock(&fs_info->trans_lock); + if (!fs_info->running_transaction) { + spin_unlock(&fs_info->trans_lock); + return 0; + } + spin_unlock(&fs_info->trans_lock); + + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); - ret = btrfs_commit_transaction(trans, root); - return ret; + return btrfs_commit_transaction(trans, root); } static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) @@ -1530,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) while (cur_devices) { head = &cur_devices->devices; list_for_each_entry(dev, head, dev_list) { + if (dev->missing) + continue; if (!first_dev || dev->devid < first_dev->devid) first_dev = dev; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 17be3dedacba..27c26004e050 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1031,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, btrfs_i_size_write(parent_inode, parent_inode->i_size + dentry->d_name.len * 2); + parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, parent_root, parent_inode); if (ret) goto abort_trans_dput; @@ -1066,7 +1067,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, memcpy(new_root_item->parent_uuid, root->root_item.uuid, BTRFS_UUID_SIZE); new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); - new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec); + new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec); btrfs_set_root_otransid(new_root_item, trans->transid); memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e86ae04abe6a..88b969aeeb71 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -227,9 +227,8 @@ loop_lock: cur = pending; pending = pending->bi_next; cur->bi_next = NULL; - atomic_dec(&fs_info->nr_async_bios); - if (atomic_read(&fs_info->nr_async_bios) < limit && + if (atomic_dec_return(&fs_info->nr_async_bios) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); @@ -569,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) memcpy(new_device, device, sizeof(*new_device)); /* Safe because we are under uuid_mutex */ - name = rcu_string_strdup(device->name->str, GFP_NOFS); - BUG_ON(device->name && !name); /* -ENOMEM */ - rcu_assign_pointer(new_device->name, name); + if (device->name) { + name = rcu_string_strdup(device->name->str, GFP_NOFS); + BUG_ON(device->name && !name); /* -ENOMEM */ + rcu_assign_pointer(new_device->name, name); + } new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; @@ -4605,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root) return ret; } -struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, - u64 logical, int mirror_num) -{ - struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; - int ret; - u64 map_length = 0; - struct btrfs_bio *bbio = NULL; - struct btrfs_device *device; - - BUG_ON(mirror_num == 0); - ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio, - mirror_num); - if (ret) { - BUG_ON(bbio != NULL); - return NULL; - } - BUG_ON(mirror_num != bbio->mirror_num); - device = bbio->stripes[mirror_num - 1].dev; - kfree(bbio); - return device; -} - int btrfs_read_chunk_tree(struct btrfs_root *root) { struct btrfs_path *path; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 5479325987b3..53c06af92e8d 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -289,8 +289,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *max_avail); -struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, - u64 logical, int mirror_num); void btrfs_dev_stat_print_on_error(struct btrfs_device *device); void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); int btrfs_get_dev_stats(struct btrfs_root *root, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 074923ce593d..f0cf934ba877 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid) /* result already set, check signature */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { - if (cifs_verify_signature(rdata->iov, rdata->nr_iov, - server, mid->sequence_number + 1)) - cERROR(1, "Unexpected SMB signature"); + int rc = 0; + + rc = cifs_verify_signature(rdata->iov, rdata->nr_iov, + server, + mid->sequence_number + 1); + if (rc) + cERROR(1, "SMB signature verification returned " + "error = %d", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->bytes); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index cbe709ad6663..781025be48bc 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -356,19 +356,12 @@ cifs_create_get_file_info: cifs_create_set_dentry: if (rc != 0) { cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); + CIFSSMBClose(xid, tcon, *fileHandle); goto out; } d_drop(direntry); d_add(direntry, newinode); - /* ENOENT for create? How weird... */ - rc = -ENOENT; - if (!newinode) { - CIFSSMBClose(xid, tcon, *fileHandle); - goto out; - } - rc = 0; - out: kfree(buf); kfree(full_path); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 7354877fa3bd..cb79c7edecb0 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); - unsigned long oldtime = cifs_i->time; cifs_revalidate_cache(inode, fattr); + spin_lock(&inode->i_lock); inode->i_atime = fattr->cf_atime; inode->i_mtime = fattr->cf_mtime; inode->i_ctime = fattr->cf_ctime; @@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) else cifs_i->time = jiffies; - cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode, - oldtime, cifs_i->time); - cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; cifs_i->server_eof = fattr->cf_eof; @@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) * Can't safely change the file size here if the client is writing to * it due to potential races. */ - spin_lock(&inode->i_lock); if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { i_size_write(inode, fattr->cf_eof); @@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb) if (rc && tcon->ipc) { cFYI(1, "ipc connection - fake read inode"); + spin_lock(&inode->i_lock); inode->i_mode |= S_IFDIR; set_nlink(inode, 2); inode->i_op = &cifs_ipc_inode_ops; inode->i_fop = &simple_dir_operations; inode->i_uid = cifs_sb->mnt_uid; inode->i_gid = cifs_sb->mnt_gid; + spin_unlock(&inode->i_lock); } else if (rc) { iget_failed(inode); inode = ERR_PTR(rc); @@ -1110,6 +1108,15 @@ undo_setattr: goto out_close; } +/* copied from fs/nfs/dir.c with small changes */ +static void +cifs_drop_nlink(struct inode *inode) +{ + spin_lock(&inode->i_lock); + if (inode->i_nlink > 0) + drop_nlink(inode); + spin_unlock(&inode->i_lock); +} /* * If dentry->d_inode is null (usually meaning the cached dentry @@ -1166,13 +1173,13 @@ retry_std_delete: psx_del_no_retry: if (!rc) { if (inode) - drop_nlink(inode); + cifs_drop_nlink(inode); } else if (rc == -ENOENT) { d_drop(dentry); } else if (rc == -ETXTBSY) { rc = cifs_rename_pending_delete(full_path, dentry, xid); if (rc == 0) - drop_nlink(inode); + cifs_drop_nlink(inode); } else if ((rc == -EACCES) && (dosattr == 0) && inode) { attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); if (attrs == NULL) { @@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode, * setting nlink not necessary except in cases where we failed to get it * from the server or was set bogus */ + spin_lock(&dentry->d_inode->i_lock); if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2)) set_nlink(dentry->d_inode, 2); - + spin_unlock(&dentry->d_inode->i_lock); mode &= ~current_umask(); /* must turn on setgid bit if parent dir has it */ if (inode->i_mode & S_ISGID) diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 09e4b3ae4564..e6ce3b112875 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, if (old_file->d_inode) { cifsInode = CIFS_I(old_file->d_inode); if (rc == 0) { + spin_lock(&old_file->d_inode->i_lock); inc_nlink(old_file->d_inode); + spin_unlock(&old_file->d_inode->i_lock); /* BB should we make this contingent on superblock flag NOATIME? */ /* old_file->d_inode->i_ctime = CURRENT_TIME;*/ /* parent dir timestamps will update from srv diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index a4ff5d547554..e4d3b9964167 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid) cERROR(1, "Bad protocol string signature header %x", *(unsigned int *) hdr->ProtocolId); if (mid != hdr->MessageId) - cERROR(1, "Mids do not match"); + cERROR(1, "Mids do not match: %llu and %llu", mid, + hdr->MessageId); } cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId); return 1; @@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length) * ie Validate the wct via smb2_struct_sizes table above */ - if (length < 2 + sizeof(struct smb2_hdr)) { + if (length < sizeof(struct smb2_pdu)) { if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) { pdu->StructureSize2 = 0; /* @@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length) return 1; } if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) { - cERROR(1, "SMB length greater than maximum, mid=%lld", mid); + cERROR(1, "SMB length greater than maximum, mid=%llu", mid); return 1; } if (check_smb2_hdr(hdr, mid)) return 1; - if (hdr->StructureSize != SMB2_HEADER_SIZE) { - cERROR(1, "Illegal structure size %d", + if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { + cERROR(1, "Illegal structure size %u", le16_to_cpu(hdr->StructureSize)); return 1; } @@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length) if (4 + len != clc_len) { cFYI(1, "Calculated size %u length %u mismatch mid %llu", clc_len, 4 + len, mid); - if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */ - return 0; /* BB workaround Samba 3 bug SessSetup rsp */ + /* server can return one byte more */ + if (clc_len == 4 + len + 1) + return 0; return 1; } return 0; diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index f37a1b41b402..c5fbfac5d576 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -87,10 +87,6 @@ #define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe) -#define SMB2_HEADER_SIZE __constant_le16_to_cpu(64) - -#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9) - /* * SMB2 Header Definition * @@ -99,6 +95,9 @@ * "PDU" : "Protocol Data Unit" (ie a network "frame") * */ + +#define SMB2_HEADER_STRUCTURE_SIZE __constant_le16_to_cpu(64) + struct smb2_hdr { __be32 smb2_buf_length; /* big endian on wire */ /* length is only two or three bytes - with @@ -140,6 +139,9 @@ struct smb2_pdu { * command code name for the struct. Note that structures must be packed. * */ + +#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9) + struct smb2_err_rsp { struct smb2_hdr hdr; __le16 StructureSize; diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 83867ef348df..d9b639b95fa8 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, /* convert the length into a more usable form */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { struct kvec iov; + int rc = 0; iov.iov_base = mid->resp_buf; iov.iov_len = len; /* FIXME: add code to kill session */ - if (cifs_verify_signature(&iov, 1, server, - mid->sequence_number + 1) != 0) - cERROR(1, "Unexpected SMB signature"); + rc = cifs_verify_signature(&iov, 1, server, + mid->sequence_number + 1); + if (rc) + cERROR(1, "SMB signature verification returned error = " + "%d", rc); } /* BB special case reconnect tid and uid here? */ diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index df0de27c2733..e784a217b500 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) struct completion complete; bio_init(&bio); + bio.bi_max_vecs = 1; bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; @@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, struct address_space *mapping = super->s_mapping_inode->i_mapping; struct bio *bio; struct page *page; - struct request_queue *q = bdev_get_queue(sb->s_bdev); - unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); + unsigned int max_pages; int i; - if (max_pages > BIO_MAX_PAGES) - max_pages = BIO_MAX_PAGES; + max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); + bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); @@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, { struct logfs_super *super = logfs_super(sb); struct bio *bio; - struct request_queue *q = bdev_get_queue(sb->s_bdev); - unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); + unsigned int max_pages; int i; - if (max_pages > BIO_MAX_PAGES) - max_pages = BIO_MAX_PAGES; + max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); + bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c index a422f42238b2..6984562738d3 100644 --- a/fs/logfs/inode.c +++ b/fs/logfs/inode.c @@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, logfs_i_callback); } +static void __logfs_destroy_meta_inode(struct inode *inode) +{ + struct logfs_inode *li = logfs_inode(inode); + BUG_ON(li->li_block); + call_rcu(&inode->i_rcu, logfs_i_callback); +} + static void logfs_destroy_inode(struct inode *inode) { struct logfs_inode *li = logfs_inode(inode); + if (inode->i_ino < LOGFS_RESERVED_INOS) { + /* + * The reserved inodes are never destroyed unless we are in + * unmont path. + */ + __logfs_destroy_meta_inode(inode); + return; + } + BUG_ON(list_empty(&li->li_freeing_list)); spin_lock(&logfs_inode_lock); li->li_refcount--; @@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); /* kill the meta-inodes */ - iput(super->s_master_inode); iput(super->s_segfile_inode); + iput(super->s_master_inode); iput(super->s_mapping_inode); } diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c index 1e1c369df22b..2a09b8d73989 100644 --- a/fs/logfs/journal.c +++ b/fs/logfs/journal.c @@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area, index = ofs >> PAGE_SHIFT; page_ofs = ofs & (PAGE_SIZE - 1); - page = find_lock_page(mapping, index); + page = find_or_create_page(mapping, index, GFP_NOFS); BUG_ON(!page); memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); unlock_page(page); diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index f1cb512c5019..5be0abef603d 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c @@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode) return; } - BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS); page = inode_to_page(inode); BUG_ON(!page); /* FIXME: Use emergency page */ logfs_put_write_page(page); diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index e28d090c98d6..038da0991794 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c @@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb) static void map_invalidatepage(struct page *page, unsigned long l) { - BUG(); + return; } static int map_releasepage(struct page *page, gfp_t g) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index ced362533e3c..bfacf0d5a225 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -118,7 +118,8 @@ enum drm_mode_status { .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ - .vscan = (vs), .flags = (f), .vrefresh = 0 + .vscan = (vs), .flags = (f), .vrefresh = 0, \ + .base.type = DRM_MODE_OBJECT_MODE #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 5581980b14f6..3d6301b6ec16 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h @@ -359,8 +359,9 @@ struct drm_mode_mode_cmd { struct drm_mode_modeinfo mode; }; -#define DRM_MODE_CURSOR_BO (1<<0) -#define DRM_MODE_CURSOR_MOVE (1<<1) +#define DRM_MODE_CURSOR_BO 0x01 +#define DRM_MODE_CURSOR_MOVE 0x02 +#define DRM_MODE_CURSOR_FLAGS 0x03 /* * depending on the value in flags different members are used. diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 604382143bcf..594b419b7d20 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -82,10 +82,18 @@ __x - (__x % (y)); \ } \ ) + +/* + * Divide positive or negative dividend by positive divisor and round + * to closest integer. Result is undefined for negative divisors. + */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ - typeof(divisor) __divisor = divisor; \ - (((x) + ((__divisor) / 2)) / (__divisor)); \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) >= 0 || (__x) >= 0) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ } \ ) diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 111aca5e97f3..4b27f9f503e4 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -239,6 +239,7 @@ struct mmc_card { #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ +#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ /* byte mode */ unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ #define MMC_NO_POWER_NOTIFICATION 0 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index fc3526077348..6b4565c440c8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2149,7 +2149,7 @@ #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_5706S 0x16aa -#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab +#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 #define PCI_DEVICE_ID_NX2_5708S 0x16ac #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae diff --git a/include/linux/time.h b/include/linux/time.h index b0bbd8f0130d..b51e664c83e7 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -125,6 +125,13 @@ static inline bool timespec_valid(const struct timespec *ts) /* Can't have more nanoseconds then a second */ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return false; + return true; +} + +static inline bool timespec_valid_strict(const struct timespec *ts) +{ + if (!timespec_valid(ts)) + return false; /* Disallow values that could overflow ktime_t */ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) return false; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 802de56c41e8..2f322c38bd4d 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -136,6 +136,22 @@ static inline void tracepoint_synchronize_unregister(void) postrcu; \ } while (0) +#ifndef MODULE +#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ + static inline void trace_##name##_rcuidle(proto) \ + { \ + if (static_key_false(&__tracepoint_##name.key)) \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond), \ + rcu_idle_exit(), \ + rcu_idle_enter()); \ + } +#else +#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) +#endif + /* * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the @@ -151,16 +167,8 @@ static inline void tracepoint_synchronize_unregister(void) TP_ARGS(data_args), \ TP_CONDITION(cond),,); \ } \ - static inline void trace_##name##_rcuidle(proto) \ - { \ - if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(&__tracepoint_##name, \ - TP_PROTO(data_proto), \ - TP_ARGS(data_args), \ - TP_CONDITION(cond), \ - rcu_idle_exit(), \ - rcu_idle_enter()); \ - } \ + __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ + PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index e1ce1048fe5f..4a045cda9c60 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -18,6 +18,7 @@ struct nf_conntrack_ecache { u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ u32 pid; /* netlink pid of destroyer */ + struct timer_list timeout; }; static inline struct nf_conntrack_ecache * diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 0c1485e42be6..34e5eac81424 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -428,7 +428,7 @@ int do_settimeofday(const struct timespec *tv) struct timespec ts_delta, xt; unsigned long flags; - if (!timespec_valid(tv)) + if (!timespec_valid_strict(tv)) return -EINVAL; write_seqlock_irqsave(&tk->lock, flags); @@ -476,7 +476,7 @@ int timekeeping_inject_offset(struct timespec *ts) /* Make sure the proposed value is valid */ tmp = timespec_add(tk_xtime(tk), *ts); - if (!timespec_valid(&tmp)) { + if (!timespec_valid_strict(&tmp)) { ret = -EINVAL; goto error; } @@ -659,7 +659,7 @@ void __init timekeeping_init(void) struct timespec now, boot, tmp; read_persistent_clock(&now); - if (!timespec_valid(&now)) { + if (!timespec_valid_strict(&now)) { pr_warn("WARNING: Persistent clock returned invalid value!\n" " Check your CMOS/BIOS settings.\n"); now.tv_sec = 0; @@ -667,7 +667,7 @@ void __init timekeeping_init(void) } read_boot_clock(&boot); - if (!timespec_valid(&boot)) { + if (!timespec_valid_strict(&boot)) { pr_warn("WARNING: Boot clock returned invalid value!\n" " Check your CMOS/BIOS settings.\n"); boot.tv_sec = 0; @@ -713,7 +713,7 @@ static struct timespec timekeeping_suspend_time; static void __timekeeping_inject_sleeptime(struct timekeeper *tk, struct timespec *delta) { - if (!timespec_valid(delta)) { + if (!timespec_valid_strict(delta)) { printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " "sleep delta value!\n"); return; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bd92431d4c49..4ada3be6e252 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2562,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) break; default: - BUG(); + return -EINVAL; } l = strlen(policy_modes[mode]); diff --git a/mm/slab.c b/mm/slab.c index f8b0d539b482..811af03a14ef 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3260,6 +3260,7 @@ force_grow: /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); + node = numa_mem_id(); /* no objects in sight? abort */ if (!x && (ac->avail == 0 || force_refill)) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 346b1eb83a1f..e4ba3e70c174 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev) struct napi_struct *napi; int budget = 16; - WARN_ON_ONCE(!irqs_disabled()); - list_for_each_entry(napi, &dev->napi_list, dev_list) { - local_irq_enable(); if (napi->poll_owner != smp_processor_id() && spin_trylock(&napi->poll_lock)) { - rcu_read_lock_bh(); budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), napi, budget); - rcu_read_unlock_bh(); spin_unlock(&napi->poll_lock); - if (!budget) { - local_irq_disable(); + if (!budget) break; - } } - local_irq_disable(); } } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 8eec8f4a0536..ebdf06f938bf 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; static struct mr_table *ipmr_new_table(struct net *net, u32 id); +static void ipmr_free_table(struct mr_table *mrt); + static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local); @@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); +static void mroute_clean_tables(struct mr_table *mrt); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES @@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net) list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { list_del(&mrt->list); - kfree(mrt); + ipmr_free_table(mrt); } fib_rules_unregister(net->ipv4.mr_rules_ops); } @@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net) static void __net_exit ipmr_rules_exit(struct net *net) { - kfree(net->ipv4.mrt); + ipmr_free_table(net->ipv4.mrt); } #endif @@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) return mrt; } +static void ipmr_free_table(struct mr_table *mrt) +{ + del_timer_sync(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt); + kfree(mrt); +} + /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 4ad9cf173992..9c87cde28ff8 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, ret = nf_ct_expect_related(rtcp_exp); if (ret == 0) break; - else if (ret != -EBUSY) { + else if (ret == -EBUSY) { + nf_ct_unexpect_related(rtp_exp); + continue; + } else if (ret < 0) { nf_ct_unexpect_related(rtp_exp); port = 0; break; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index fd9ecb52c66b..82cf2a722b23 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (mtu < ip_rt_min_pmtu) mtu = ip_rt_min_pmtu; + rcu_read_lock(); if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { struct fib_nh *nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, 0, mtu, jiffies + ip_rt_mtu_expires); } + rcu_read_unlock(); return mtu; } @@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, dst->obsolete = DST_OBSOLETE_KILL; } else { rt->rt_pmtu = mtu; - dst_set_expires(&rt->dst, ip_rt_mtu_expires); + rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires); } } @@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst) { struct rtable *rt = (struct rtable *) dst; - if (dst->flags & DST_NOCACHE) { + if (!list_empty(&rt->rt_uncached)) { spin_lock_bh(&rt_uncached_lock); list_del(&rt->rt_uncached); spin_unlock_bh(&rt_uncached_lock); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 85308b90df80..6e38c6c23caa 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, - int newly_acked_sacked, bool is_dupack, + int prior_sacked, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); + int newly_acked_sacked = 0; int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) @@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, tcp_add_reno_sack(sk); } else do_lost = tcp_try_undo_partial(sk, pkts_acked); + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; break; case TCP_CA_Loss: if (flag & FLAG_DATA_ACKED) @@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (is_dupack) tcp_add_reno_sack(sk); } + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); @@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; - int newly_acked_sacked = 0; bool frto_cwnd = false; /* If the ack is older than previous acks @@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); pkts_acked = prior_packets - tp->packets_out; - newly_acked_sacked = (prior_packets - prior_sacked) - - (tp->packets_out - tp->sacked_out); if (tp->frto_counter) frto_cwnd = tcp_process_frto(sk, flag); @@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) @@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than @@ -3718,8 +3718,7 @@ old_ack: */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); - newly_acked_sacked = tp->sacked_out - prior_sacked; - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 6dc7fd353ef5..282f3723ee19 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) struct esp_data *esp = x->data; /* skb is pure payload to encrypt */ - err = -ENOMEM; - aead = esp->aead; alen = crypto_aead_authsize(aead); @@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); - if (!tmp) + if (!tmp) { + err = -ENOMEM; goto error; + } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 393355d37b47..513cab08a986 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) /* Remove from tunnel list */ spin_lock_bh(&pn->l2tp_tunnel_list_lock); list_del_rcu(&tunnel->list); + kfree_rcu(tunnel, rcu); spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - synchronize_rcu(); atomic_dec(&l2tp_tunnel_count); - kfree(tunnel); } /* Create a socket for the tunnel, if one isn't set up by diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a38ec6cdeee1..56d583e083a7 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg { struct l2tp_tunnel { int magic; /* Should be L2TP_TUNNEL_MAGIC */ + struct rcu_head rcu; rwlock_t hlist_lock; /* protect session_hlist */ struct hlist_head session_hlist[L2TP_HASH_SIZE]; /* hashed list of sessions, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index acf712ffb5e6..c5e8c9c31f76 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata, NULL, NULL); } else { - int is_mesh_mcast = 1; - const u8 *mesh_da; + /* DS -> MBSS (802.11-2012 13.11.3.3). + * For unicast with unknown forwarding information, + * destination might be in the MBSS or if that fails + * forwarded to another mesh gate. In either case + * resolution will be handled in ieee80211_xmit(), so + * leave the original DA. This also works for mcast */ + const u8 *mesh_da = skb->data; + + if (mppath) + mesh_da = mppath->mpp; + else if (mpath) + mesh_da = mpath->dst; + rcu_read_unlock(); - if (is_multicast_ether_addr(skb->data)) - /* DA TA mSA AE:SA */ - mesh_da = skb->data; - else { - static const u8 bcast[ETH_ALEN] = - { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - if (mppath) { - /* RA TA mDA mSA AE:DA SA */ - mesh_da = mppath->mpp; - is_mesh_mcast = 0; - } else if (mpath) { - mesh_da = mpath->dst; - is_mesh_mcast = 0; - } else { - /* DA TA mSA AE:SA */ - mesh_da = bcast; - } - } hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, mesh_da, sdata->vif.addr); - rcu_read_unlock(); - if (is_mesh_mcast) + if (is_multicast_ether_addr(mesh_da)) + /* DA TA mSA AE:SA */ meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata, skb->data + ETH_ALEN, NULL); else + /* RA TA mDA mSA AE:DA SA */ meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata, diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 72bf32a84874..f51013c07b9f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, goto out_err; } svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!svc->stats.cpustats) + if (!svc->stats.cpustats) { + ret = -ENOMEM; goto out_err; + } /* I'm the first user of the service */ atomic_set(&svc->usecnt, 0); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index cf4875565d67..2ceec64b19f9 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { /* bad luck, let's retry again */ - ct->timeout.expires = jiffies + + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); return; } /* we've got the event delivered, now it's dying */ @@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack) void nf_ct_insert_dying_list(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); /* add this conntrack to the dying list */ spin_lock_bh(&nf_conntrack_lock); @@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct) &net->ct.dying); spin_unlock_bh(&nf_conntrack_lock); /* set a new timer to retry event delivery */ - setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); - ct->timeout.expires = jiffies + + setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); } EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index da4fc37a8578..9807f3278fcb 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void) goto err_unreg_subsys; } - if (register_pernet_subsys(&ctnetlink_net_ops)) { + ret = register_pernet_subsys(&ctnetlink_net_ops); + if (ret < 0) { pr_err("ctnetlink_init: cannot register pernet operations\n"); goto err_unreg_exp_subsys; } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 169ab59ed9d4..14e2f3903142 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst, } if (indev && skb_mac_header_was_set(skb)) { - if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || + if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || nla_put_be16(inst->skb, NFULA_HWLEN, htons(skb->dev->hard_header_len)) || nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, @@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void) #ifdef CONFIG_PROC_FS if (!proc_create("nfnetlink_log", 0440, - proc_net_netfilter, &nful_file_ops)) + proc_net_netfilter, &nful_file_ops)) { + status = -ENOMEM; goto cleanup_logger; + } #endif return status; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1445d73533ed..527023823b5c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; - if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) + if ((dst_group || dst_pid) && + !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; @@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void) rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); nl_table[NETLINK_USERSOCK].module = THIS_MODULE; nl_table[NETLINK_USERSOCK].registered = 1; + nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND; netlink_table_ungrab(); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index aee7196aac36..c5c9e2a54218 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) spin_unlock(&f->lock); } -bool match_fanout_group(struct packet_type *ptype, struct sock * sk) +static bool match_fanout_group(struct packet_type *ptype, struct sock * sk) { if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) return true; diff --git a/net/socket.c b/net/socket.c index a5471f804d99..edc3c4af9085 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) - err = compat_put_timeval(up, &ktv); + err = compat_put_timeval(&ktv, up); return err; } @@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) - err = compat_put_timespec(up, &kts); + err = compat_put_timespec(&kts, up); return err; } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 87cd0e4d4282..210be48d8ae3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) goto error; x->outer_mode = xfrm_get_mode(x->props.mode, family); - if (x->outer_mode == NULL) + if (x->outer_mode == NULL) { + err = -EPROTONOSUPPORT; goto error; + } if (init_replay) { err = xfrm_init_replay(x); diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst index 6bf8e87f1dcf..c3f69ae275d1 100644 --- a/scripts/Makefile.fwinst +++ b/scripts/Makefile.fwinst @@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@) $(installed-fw-dirs): $(call cmd,mkdir) -$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %) +$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%) $(call cmd,install) PHONY += __fw_install __fw_modinst FORCE diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 4629038c9e5a..4235a6361fec 100644 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -211,7 +211,7 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then if ! cmp -s System.map .tmp_System.map; then echo >&2 Inconsistent kallsyms data - echo >&2 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround + echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround cleanup exit 1 fi diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index f560051a949e..f25c24c743f9 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec) kfree(codec); } +static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, + hda_nid_t fg, unsigned int power_state); + static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, unsigned int power_state); @@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, AC_VERB_GET_SUBSYSTEM_ID, 0); } + codec->epss = snd_hda_codec_get_supported_ps(codec, + codec->afg ? codec->afg : codec->mfg, + AC_PWRST_EPSS); + /* power-up all before initialization */ hda_set_power_state(codec, codec->afg ? codec->afg : codec->mfg, @@ -3543,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, /* this delay seems necessary to avoid click noise at power-down */ if (power_state == AC_PWRST_D3) { /* transition time less than 10ms for power down */ - bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS); - msleep(epss ? 10 : 100); + msleep(codec->epss ? 10 : 100); } /* repeat power states setting at most 10 times*/ diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 7fbc1bcaf1a9..e5a7e19a8071 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -862,6 +862,7 @@ struct hda_codec { unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ unsigned int no_jack_detect:1; /* Machine has no jack-detection */ unsigned int pcm_format_first:1; /* PCM format must be set first */ + unsigned int epss:1; /* supporting EPSS? */ #ifdef CONFIG_SND_HDA_POWER_SAVE unsigned int power_on :1; /* current (global) power-state */ int power_transition; /* power-state in transition */ diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index ea5775a1a7db..6f806d3e56bb 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4543,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec, struct auto_pin_cfg *cfg = &spec->autocfg; int i; + if (cfg->speaker_outs == 0) + return; + for (i = 0; i < cfg->line_outs; i++) { if (presence) break; @@ -5531,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); } + codec->epss = 0; /* longer delay needed for D3 */ codec->no_trigger_sense = 1; codec->spec = spec; diff --git a/sound/usb/card.c b/sound/usb/card.c index d5b5c3388e28..4a469f0cb6d4 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, struct snd_usb_audio *chip) { struct snd_card *card; - struct list_head *p; + struct list_head *p, *n; if (chip == (void *)-1L) return; @@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, snd_usb_stream_disconnect(p); } /* release the endpoint resources */ - list_for_each(p, &chip->ep_list) { + list_for_each_safe(p, n, &chip->ep_list) { snd_usb_endpoint_free(p); } /* release the midi resources */ diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index c41181202688..d6e2bb49c59c 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep) * * For implicit feedback, next_packet_size() is unused. */ -static int next_packet_size(struct snd_usb_endpoint *ep) +int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) { unsigned long flags; int ret; @@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep, ep->retire_data_urb(ep->data_subs, urb); } -static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep, - struct snd_urb_ctx *ctx) -{ - int i; - - for (i = 0; i < ctx->packets; ++i) - ctx->packet_size[i] = next_packet_size(ep); -} - /* * Prepare a PLAYBACK urb for submission to the bus. */ @@ -370,7 +361,6 @@ static void snd_complete_urb(struct urb *urb) goto exit_clear; } - prepare_outbound_urb_sizes(ep, ctx); prepare_outbound_urb(ep, ctx); } else { retire_inbound_urb(ep, ctx); @@ -799,7 +789,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, /** * snd_usb_endpoint_start: start an snd_usb_endpoint * - * @ep: the endpoint to start + * @ep: the endpoint to start + * @can_sleep: flag indicating whether the operation is executed in + * non-atomic context * * A call to this function will increment the use count of the endpoint. * In case it is not already running, the URBs for this endpoint will be @@ -809,7 +801,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, * * Returns an error if the URB submission failed, 0 in all other cases. */ -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep) { int err; unsigned int i; @@ -821,6 +813,11 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) if (++ep->use_count != 1) return 0; + /* just to be sure */ + deactivate_urbs(ep, 0, can_sleep); + if (can_sleep) + wait_clear_urbs(ep); + ep->active_mask = 0; ep->unlink_mask = 0; ep->phase = 0; @@ -850,7 +847,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) goto __error; if (usb_pipeout(ep->pipe)) { - prepare_outbound_urb_sizes(ep, urb->context); prepare_outbound_urb(ep, urb->context); } else { prepare_inbound_urb(ep, urb->context); diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h index ee2723fb174f..cbbbdf226d66 100644 --- a/sound/usb/endpoint.h +++ b/sound/usb/endpoint.h @@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, struct audioformat *fmt, struct snd_usb_endpoint *sync_ep); -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep); +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep); void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, int force, int can_sleep, int wait); int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); @@ -21,6 +21,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); void snd_usb_endpoint_free(struct list_head *head); int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); +int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, struct snd_usb_endpoint *sender, diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 62ec808ed792..fd5e982fc98c 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, } } -static int start_endpoints(struct snd_usb_substream *subs) +static int start_endpoints(struct snd_usb_substream *subs, int can_sleep) { int err; @@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs) snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); ep->data_subs = subs; - err = snd_usb_endpoint_start(ep); + err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); return err; @@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs) !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { struct snd_usb_endpoint *ep = subs->sync_endpoint; + if (subs->data_endpoint->iface != subs->sync_endpoint->iface || + subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) { + err = usb_set_interface(subs->dev, + subs->sync_endpoint->iface, + subs->sync_endpoint->alt_idx); + if (err < 0) { + snd_printk(KERN_ERR + "%d:%d:%d: cannot set interface (%d)\n", + subs->dev->devnum, + subs->sync_endpoint->iface, + subs->sync_endpoint->alt_idx, err); + return -EIO; + } + } + snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); ep->sync_slave = subs->data_endpoint; - err = snd_usb_endpoint_start(ep); + err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); return err; @@ -544,13 +559,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) subs->last_frame_number = 0; runtime->delay = 0; - /* clear the pending deactivation on the target EPs */ - deactivate_endpoints(subs); - /* for playback, submit the URBs now; otherwise, the first hwptr_done * updates for all URBs would happen at the same time when starting */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) - return start_endpoints(subs); + return start_endpoints(subs, 1); return 0; } @@ -1032,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; + struct snd_usb_endpoint *ep = subs->data_endpoint; struct snd_urb_ctx *ctx = urb->context; unsigned int counts, frames, bytes; int i, stride, period_elapsed = 0; @@ -1043,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, urb->number_of_packets = 0; spin_lock_irqsave(&subs->lock, flags); for (i = 0; i < ctx->packets; i++) { - counts = ctx->packet_size[i]; + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; + else + counts = snd_usb_endpoint_next_packet_size(ep); + /* set up descriptor */ urb->iso_frame_desc[i].offset = frames * stride; urb->iso_frame_desc[i].length = counts * stride; @@ -1094,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, subs->hwptr_done += bytes; if (subs->hwptr_done >= runtime->buffer_size * stride) subs->hwptr_done -= runtime->buffer_size * stride; + + /* update delay with exact number of samples queued */ + runtime->delay = subs->last_delay; runtime->delay += frames; + subs->last_delay = runtime->delay; + + /* realign last_frame_number */ + subs->last_frame_number = usb_get_current_frame_number(subs->dev); + subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ + spin_unlock_irqrestore(&subs->lock, flags); urb->transfer_buffer_length = bytes; if (period_elapsed) @@ -1112,12 +1138,26 @@ static void retire_playback_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; int stride = runtime->frame_bits >> 3; int processed = urb->transfer_buffer_length / stride; + int est_delay; spin_lock_irqsave(&subs->lock, flags); - if (processed > runtime->delay) - runtime->delay = 0; + est_delay = snd_usb_pcm_delay(subs, runtime->rate); + /* update delay with exact number of samples played */ + if (processed > subs->last_delay) + subs->last_delay = 0; else - runtime->delay -= processed; + subs->last_delay -= processed; + runtime->delay = subs->last_delay; + + /* + * Report when delay estimate is off by more than 2ms. + * The error should be lower than 2ms since the estimate relies + * on two reads of a counter updated every ms. + */ + if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) + snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n", + est_delay, subs->last_delay); + spin_unlock_irqrestore(&subs->lock, flags); } @@ -1175,7 +1215,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream switch (cmd) { case SNDRV_PCM_TRIGGER_START: - err = start_endpoints(subs); + err = start_endpoints(subs, 0); if (err < 0) return err; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 246852397e30..d617f69131d7 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp, if (copy_from_user(&csigset, sigmask_arg->sigset, sizeof csigset)) goto out; - } - sigset_from_compat(&sigset, &csigset); - r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + sigset_from_compat(&sigset, &csigset); + r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + } else + r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); break; } default: |