diff options
Diffstat (limited to 'drivers')
222 files changed, 1937 insertions, 1698 deletions
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 8244f013f210..f1c966e05078 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -193,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win, u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; bool wp = addr->info.mem.write_protect; u64 len = attr->address_length; + u64 start, end, offset = 0; struct resource *res = &win->res; /* @@ -204,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win, pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", addr->min_address_fixed, addr->max_address_fixed, len); - res->start = attr->minimum; - res->end = attr->maximum; - /* * For bridges that translate addresses across the bridge, * translation_offset is the offset that must be added to the @@ -214,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win, * primary side. Non-bridge devices must list 0 for all Address * Translation offset bits. */ - if (addr->producer_consumer == ACPI_PRODUCER) { - res->start += attr->translation_offset; - res->end += attr->translation_offset; - } else if (attr->translation_offset) { + if (addr->producer_consumer == ACPI_PRODUCER) + offset = attr->translation_offset; + else if (attr->translation_offset) pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", attr->translation_offset); + start = attr->minimum + offset; + end = attr->maximum + offset; + + win->offset = offset; + res->start = start; + res->end = end; + if (sizeof(resource_size_t) < sizeof(u64) && + (offset != win->offset || start != res->start || end != res->end)) { + pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n", + attr->minimum, attr->maximum); + return false; } switch (addr->resource_type) { @@ -236,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win, return false; } - win->offset = attr->translation_offset; - if (addr->producer_consumer == ACPI_PRODUCER) res->flags |= IORESOURCE_WINDOW; diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index a9b0c820f2eb..5d9ee99c2148 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -4,7 +4,7 @@ * Arasan Compact Flash host controller source file * * Copyright (C) 2011 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -968,7 +968,7 @@ static struct platform_driver arasan_cf_driver = { module_platform_driver(arasan_cf_driver); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index d1d6141920d3..7920c2741b47 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) goto out_free_disk; add_disk(ns->disk); - if (ns->ms) - revalidate_disk(ns->disk); + if (ns->ms) { + struct block_device *bd = bdget_disk(ns->disk, 0); + if (!bd) + return; + if (blkdev_get(bd, FMODE_READ, NULL)) { + bdput(bd); + return; + } + blkdev_reread_part(bd); + blkdev_put(bd, FMODE_READ); + } return; out_free_disk: kfree(disk); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 1e1a4323a71f..9ceb8ac68fdc 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev) /* Read Verbose Config Version Info */ skb = btbcm_read_verbose_config(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], - get_unaligned_le16(skb->data + 5)); - kfree_skb(skb); + if (!IS_ERR(skb)) { + BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], + get_unaligned_le16(skb->data + 5)); + kfree_skb(skb); + } set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 283f00a7f036..1082d4bb016a 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, device_initialize(&chip->dev); - chip->cdev.owner = chip->pdev->driver->owner; cdev_init(&chip->cdev, &tpm_fops); + chip->cdev.owner = chip->pdev->driver->owner; + chip->cdev.kobj.parent = &chip->dev.kobj; return chip; } diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 44f9d20c19ac..1267322595da 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device) return -ENODEV; } + /* At least some versions of AMI BIOS have a bug that TPM2 table has + * zero address for the control area and therefore we must fail. + */ + if (!buf->control_area_pa) { + dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n"); + return -EINVAL; + } + if (buf->hdr.length < sizeof(struct acpi_tpm2)) { dev_err(dev, "TPM2 ACPI table has wrong size"); return -EINVAL; diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c index bdfb4421c643..f271c350ef94 100644 --- a/drivers/clk/spear/clk-aux-synth.c +++ b/drivers/clk/spear/clk-aux-synth.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c index dffd4ce6c8b5..58d678b5b40a 100644 --- a/drivers/clk/spear/clk-frac-synth.c +++ b/drivers/clk/spear/clk-frac-synth.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c index 1afc18c4effc..1a722e99e76e 100644 --- a/drivers/clk/spear/clk-gpt-synth.c +++ b/drivers/clk/spear/clk-gpt-synth.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c index 1b9b65bca51e..5ebddc528145 100644 --- a/drivers/clk/spear/clk-vco-pll.c +++ b/drivers/clk/spear/clk-vco-pll.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c index 628b6d5ed3d9..157fe099ea6a 100644 --- a/drivers/clk/spear/clk.c +++ b/drivers/clk/spear/clk.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h index 931737677dfa..9834944f08b1 100644 --- a/drivers/clk/spear/clk.h +++ b/drivers/clk/spear/clk.h @@ -2,7 +2,7 @@ * Clock framework definitions for SPEAr platform * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c index 4daa5977793a..222ce108b41a 100644 --- a/drivers/clk/spear/spear1310_clock.c +++ b/drivers/clk/spear/spear1310_clock.c @@ -4,7 +4,7 @@ * SPEAr1310 machine clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c index 5a5c6648308d..973c9d3fbcf8 100644 --- a/drivers/clk/spear/spear1340_clock.c +++ b/drivers/clk/spear/spear1340_clock.c @@ -4,7 +4,7 @@ * SPEAr1340 machine clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index bb5f387774e2..404a55edd613 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -2,7 +2,7 @@ * SPEAr3xx machines clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c index 4f649c9cb094..231061fa73a4 100644 --- a/drivers/clk/spear/spear6xx_clock.c +++ b/drivers/clk/spear/spear6xx_clock.c @@ -2,7 +2,7 @@ * SPEAr6xx machines clock framework source file * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b612411655f9..26063afb3eba 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(get_governor_parent_kobj); +struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) +{ + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + + return policy && !policy_is_inactive(policy) ? + policy->freq_table : NULL; +} +EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); + static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { u64 idle_time; @@ -1132,6 +1141,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) down_write(&policy->rwsem); policy->cpu = cpu; + policy->governor = NULL; up_write(&policy->rwsem); } diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index df14766a8e06..dfbbf981ed56 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); -struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); - -struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - return policy ? policy->freq_table : NULL; -} -EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); - MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("CPUfreq frequency table helpers"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index e8e2775c3821..48b7228563ad 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static void enter_freeze_proper(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { - tick_freeze(); + /* + * trace_suspend_resume() called by tick_freeze() for the last CPU + * executing it contains RCU usage regarded as invalid in the idle + * context, so tell RCU about that. + */ + RCU_NONIDLE(tick_freeze()); /* * The state used here cannot be a "coupled" one, because the "coupled" * cpuidle mechanism enables interrupts and doing that with timekeeping @@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, WARN_ON(!irqs_disabled()); /* * timekeeping_resume() that will be called by tick_unfreeze() for the - * last CPU executing it calls functions containing RCU read-side + * first CPU executing it calls functions containing RCU read-side * critical sections, so tell RCU about that. */ RCU_NONIDLE(tick_unfreeze()); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 1022c2e1a2b0..cf1c87fa1edd 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1746,4 +1746,4 @@ EXPORT_SYMBOL_GPL(dw_dma_enable); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 7a3cb1fa0a76..4630a8133ea6 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -87,6 +87,15 @@ static int brcmstb_gpio_remove(struct platform_device *pdev) struct brcmstb_gpio_bank *bank; int ret = 0; + if (!priv) { + dev_err(&pdev->dev, "called %s without drvdata!\n", __func__); + return -EFAULT; + } + + /* + * You can lose return values below, but we report all errors, and it's + * more important to actually perform all of the steps. + */ list_for_each(pos, &priv->bank_list) { bank = list_entry(pos, struct brcmstb_gpio_bank, node); ret = bgpio_remove(&bank->bgc); @@ -143,6 +152,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + platform_set_drvdata(pdev, priv); + INIT_LIST_HEAD(&priv->bank_list); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg_base = devm_ioremap_resource(dev, res); @@ -153,7 +164,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) priv->reg_base = reg_base; priv->pdev = pdev; - INIT_LIST_HEAD(&priv->bank_list); if (brcmstb_gpio_sanity_check_banks(dev, np, res)) return -EINVAL; @@ -221,8 +231,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", priv->num_banks, priv->gpio_base, gpio_base - 1); - platform_set_drvdata(pdev, priv); - return 0; fail: diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index c5e05c82d67c..c246ac3dda7c 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -578,15 +578,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) writel_relaxed(~0, &g->clr_falling); writel_relaxed(~0, &g->clr_rising); - /* set up all irqs in this bank */ - irq_set_chained_handler(bank_irq, gpio_irq_handler); - /* * Each chip handles 32 gpios, and each irq bank consists of 16 * gpio irqs. Pass the irq bank's corresponding controller to * the chained irq handler. */ - irq_set_handler_data(bank_irq, &chips[gpio / 32]); + irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler, + &chips[gpio / 32]); binten |= BIT(bank); } diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c index aed4ca9338bc..7d3c90e9da71 100644 --- a/drivers/gpio/gpio-max732x.c +++ b/drivers/gpio/gpio-max732x.c @@ -603,6 +603,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip, gc->base = gpio_start; gc->ngpio = port; gc->label = chip->client->name; + gc->dev = &chip->client->dev; gc->owner = THIS_MODULE; return port; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index b0c57d505be7..61a731ff9a07 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -500,8 +500,10 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) spin_lock_irqsave(&bank->lock, flags); retval = omap_set_gpio_triggering(bank, offset, type); - if (retval) + if (retval) { + spin_unlock_irqrestore(&bank->lock, flags); goto error; + } omap_gpio_init_irq(bank, offset); if (!omap_gpio_is_input(bank, offset)) { spin_unlock_irqrestore(&bank->lock, flags); @@ -1185,6 +1187,7 @@ static int omap_gpio_probe(struct platform_device *pdev) bank->irq = res->start; bank->dev = dev; bank->chip.dev = dev; + bank->chip.owner = THIS_MODULE; bank->dbck_flag = pdata->dbck_flag; bank->stride = pdata->bank_stride; bank->width = pdata->bank_width; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index d233eb3b8132..50caeb1ee350 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -570,6 +570,10 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, "could not connect irqchip to gpiochip\n"); return ret; } + + gpiochip_set_chained_irqchip(&chip->gpio_chip, + &pca953x_irq_chip, + client->irq, NULL); } return 0; diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index 77fe5d3cb105..d5284dfe01fe 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -220,9 +220,9 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc) if (!chip->gpio_width[1]) return; - xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET, + xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET, chip->gpio_state[1]); - xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET, + xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET, chip->gpio_dir[1]); } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 2e87c4b8da26..a78882389836 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -757,6 +757,7 @@ static int zynq_gpio_remove(struct platform_device *pdev) gpiochip_remove(&gpio->chip); clk_disable_unprepare(gpio->clk); device_set_wakeup_capable(&pdev->dev, 0); + pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d63135bf29c0..1f040d85ac47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_ib *ib; int i, j, r; @@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, for (j = 0; j < num_deps; ++j) { struct amdgpu_fence *fence; struct amdgpu_ring *ring; + struct amdgpu_ctx *ctx; r = amdgpu_cs_get_ring(adev, deps[j].ip_type, deps[j].ip_instance, @@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, if (r) return r; + ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); + if (ctx == NULL) + return -EINVAL; + r = amdgpu_fence_recreate(ring, p->filp, deps[j].handle, &fence); - if (r) + if (r) { + amdgpu_ctx_put(ctx); return r; + } amdgpu_sync_fence(&ib->sync, fence); amdgpu_fence_unref(&fence); + amdgpu_ctx_put(ctx); } } @@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, wait->in.ring, &ring); - if (r) + if (r) { + amdgpu_ctx_put(ctx); return r; + } r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); - if (r) + if (r) { + amdgpu_ctx_put(ctx); return r; + } r = fence_wait_timeout(&fence->base, true, timeout); amdgpu_fence_unref(&fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ba46be361c9b..d79009b65867 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1207,10 +1207,15 @@ static int amdgpu_early_init(struct amdgpu_device *adev) } else { if (adev->ip_blocks[i].funcs->early_init) { r = adev->ip_blocks[i].funcs->early_init((void *)adev); - if (r) + if (r == -ENOENT) + adev->ip_block_enabled[i] = false; + else if (r) return r; + else + adev->ip_block_enabled[i] = true; + } else { + adev->ip_block_enabled[i] = true; } - adev->ip_block_enabled[i] = true; } } diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index f75a31df30bd..1a2d419cbf16 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -1679,25 +1679,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) if (ret) return ret; - DRM_INFO("DPM unforce state min=%d, max=%d.\n", - pi->sclk_dpm.soft_min_clk, - pi->sclk_dpm.soft_max_clk); + DRM_DEBUG("DPM unforce state min=%d, max=%d.\n", + pi->sclk_dpm.soft_min_clk, + pi->sclk_dpm.soft_max_clk); return 0; } static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, - enum amdgpu_dpm_forced_level level) + enum amdgpu_dpm_forced_level level) { int ret = 0; switch (level) { case AMDGPU_DPM_FORCED_LEVEL_HIGH: + ret = cz_dpm_unforce_dpm_levels(adev); + if (ret) + return ret; ret = cz_dpm_force_highest(adev); if (ret) return ret; break; case AMDGPU_DPM_FORCED_LEVEL_LOW: + ret = cz_dpm_unforce_dpm_levels(adev); + if (ret) + return ret; ret = cz_dpm_force_lowest(adev); if (ret) return ret; @@ -1711,6 +1717,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, break; } + adev->pm.dpm.forced_level = level; + return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 08387dfd98a7..cc050a329c49 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v8_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v8_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 7b683fb2173c..1c7c992dea37 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1813,10 +1813,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev, u32 data, mask; data = RREG32(mmCC_RB_BACKEND_DISABLE); - if (data & 1) - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - else - data = 0; + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index fa5a4448531d..68552da40287 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->smc_idx_lock, flags); } +/* smu_8_0_d.h */ +#define mmMP0PUB_IND_INDEX 0x180 +#define mmMP0PUB_IND_DATA 0x181 + +static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmMP0PUB_IND_INDEX, (reg)); + r = RREG32(mmMP0PUB_IND_DATA); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + return r; +} + +static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(mmMP0PUB_IND_INDEX, (reg)); + WREG32(mmMP0PUB_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); +} + static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags; @@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle) bool smc_enabled = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->smc_rreg = &vi_smc_rreg; - adev->smc_wreg = &vi_smc_wreg; + if (adev->flags & AMDGPU_IS_APU) { + adev->smc_rreg = &cz_smc_rreg; + adev->smc_wreg = &cz_smc_wreg; + } else { + adev->smc_rreg = &vi_smc_rreg; + adev->smc_wreg = &vi_smc_wreg; + } adev->pcie_rreg = &vi_pcie_rreg; adev->pcie_wreg = &vi_pcie_wreg; adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 42d2ffa08716..01ffe9bffe38 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); - crtc->mode = *adj; - val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA; if (val != dcrtc->dumb_ctrl) { dcrtc->dumb_ctrl = val; diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 580e10acaa3a..60a688ef81c7 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj) if (dobj->obj.import_attach) { /* We only ever display imported data */ - dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt, - DMA_TO_DEVICE); + if (dobj->sgt) + dma_buf_unmap_attachment(dobj->obj.import_attach, + dobj->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(&dobj->obj, NULL); } diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index c5b06fdb459c..e939faba7fcc 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -7,6 +7,7 @@ * published by the Free Software Foundation. */ #include <drm/drmP.h> +#include <drm/drm_plane_helper.h> #include "armada_crtc.h" #include "armada_drm.h" #include "armada_fb.h" @@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data) if (fb) armada_drm_queue_unref_work(dcrtc->crtc.dev, fb); -} -static unsigned armada_limit(int start, unsigned size, unsigned max) -{ - int end = start + size; - if (end < 0) - return 0; - if (start < 0) - start = 0; - return (unsigned)end > max ? max - start : end - start; + wake_up(&dplane->vbl.wait); } static int @@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, { struct armada_plane *dplane = drm_to_armada_plane(plane); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + struct drm_rect src = { + .x1 = src_x, + .y1 = src_y, + .x2 = src_x + src_w, + .y2 = src_y + src_h, + }; + struct drm_rect dest = { + .x1 = crtc_x, + .y1 = crtc_y, + .x2 = crtc_x + crtc_w, + .y2 = crtc_y + crtc_h, + }; + const struct drm_rect clip = { + .x2 = crtc->mode.hdisplay, + .y2 = crtc->mode.vdisplay, + }; uint32_t val, ctrl0; unsigned idx = 0; + bool visible; int ret; - crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay); - crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay); + ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, + 0, INT_MAX, true, false, &visible); + if (ret) + return ret; + ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) | CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) | CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA; /* Does the position/size result in nothing to display? */ - if (crtc_w == 0 || crtc_h == 0) { + if (!visible) ctrl0 &= ~CFG_DMA_ENA; - } - - /* - * FIXME: if the starting point is off screen, we need to - * adjust src_x, src_y, src_w, src_h appropriately, and - * according to the scale. - */ if (!dcrtc->plane) { dcrtc->plane = plane; @@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, /* FIXME: overlay on an interlaced display */ /* Just updating the position/size? */ if (plane->fb == fb && dplane->ctrl0 == ctrl0) { - val = (src_h & 0xffff0000) | src_w >> 16; + val = (drm_rect_height(&src) & 0xffff0000) | + drm_rect_width(&src) >> 16; dplane->src_hw = val; writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN); - val = crtc_h << 16 | crtc_w; + + val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest); dplane->dst_hw = val; writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN); - val = crtc_y << 16 | crtc_x; + + val = dest.y1 << 16 | dest.x1; dplane->dst_yx = val; writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN); + return 0; } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) { /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */ @@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, dcrtc->base + LCD_SPU_SRAM_PARA1); } - ret = wait_event_timeout(dplane->vbl.wait, - list_empty(&dplane->vbl.update.node), - HZ/25); - if (ret < 0) - return ret; + wait_event_timeout(dplane->vbl.wait, + list_empty(&dplane->vbl.update.node), + HZ/25); if (plane->fb != fb) { struct armada_gem_object *obj = drm_fb_obj(fb); - uint32_t sy, su, sv; + uint32_t addr[3], pixel_format; + int i, num_planes, hsub; /* * Take a reference on the new framebuffer - we want to @@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, older_fb); } - src_y >>= 16; - src_x >>= 16; - sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] + - src_x * fb->bits_per_pixel / 8; - su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] + - src_x; - sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] + - src_x; + src_y = src.y1 >> 16; + src_x = src.x1 >> 16; - armada_reg_queue_set(dplane->vbl.regs, idx, sy, + pixel_format = fb->pixel_format; + hsub = drm_format_horz_chroma_subsampling(pixel_format); + num_planes = drm_format_num_planes(pixel_format); + + /* + * Annoyingly, shifting a YUYV-format image by one pixel + * causes the U/V planes to toggle. Toggle the UV swap. + * (Unfortunately, this causes momentary colour flickering.) + */ + if (src_x & (hsub - 1) && num_planes == 1) + ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV); + + for (i = 0; i < num_planes; i++) + addr[i] = obj->dev_addr + fb->offsets[i] + + src_y * fb->pitches[i] + + src_x * drm_format_plane_cpp(pixel_format, i); + for (; i < ARRAY_SIZE(addr); i++) + addr[i] = 0; + + armada_reg_queue_set(dplane->vbl.regs, idx, addr[0], LCD_SPU_DMA_START_ADDR_Y0); - armada_reg_queue_set(dplane->vbl.regs, idx, su, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[1], LCD_SPU_DMA_START_ADDR_U0); - armada_reg_queue_set(dplane->vbl.regs, idx, sv, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[2], LCD_SPU_DMA_START_ADDR_V0); - armada_reg_queue_set(dplane->vbl.regs, idx, sy, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[0], LCD_SPU_DMA_START_ADDR_Y1); - armada_reg_queue_set(dplane->vbl.regs, idx, su, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[1], LCD_SPU_DMA_START_ADDR_U1); - armada_reg_queue_set(dplane->vbl.regs, idx, sv, + armada_reg_queue_set(dplane->vbl.regs, idx, addr[2], LCD_SPU_DMA_START_ADDR_V1); val = fb->pitches[0] << 16 | fb->pitches[0]; @@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, LCD_SPU_DMA_PITCH_UV); } - val = (src_h & 0xffff0000) | src_w >> 16; + val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16; if (dplane->src_hw != val) { dplane->src_hw = val; armada_reg_queue_set(dplane->vbl.regs, idx, val, LCD_SPU_DMA_HPXL_VLN); } - val = crtc_h << 16 | crtc_w; + + val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest); if (dplane->dst_hw != val) { dplane->dst_hw = val; armada_reg_queue_set(dplane->vbl.regs, idx, val, LCD_SPU_DZM_HPXL_VLN); } - val = crtc_y << 16 | crtc_x; + + val = dest.y1 << 16 | dest.x1; if (dplane->dst_yx != val) { dplane->dst_yx = val; armada_reg_queue_set(dplane->vbl.regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN); } + if (dplane->ctrl0 != ctrl0) { dplane->ctrl0 = ctrl0; armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0, @@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane) static void armada_plane_destroy(struct drm_plane *plane) { - kfree(plane); + struct armada_plane *dplane = drm_to_armada_plane(plane); + + drm_plane_cleanup(plane); + + kfree(dplane); } static int armada_plane_set_property(struct drm_plane *plane, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index b9ba06176eb1..357bd04a173b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - /* For some reason crtc x/y offsets are signed internally. */ - if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) + /* + * Universal plane src offsets are only 16.16, prevent havoc for + * drivers using universal plane code internally. + */ + if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) return -ERANGE; drm_modeset_lock_all(dev); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index aa8bbb460c57..9cfcd0aef0df 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -70,6 +70,8 @@ #define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) +#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t) + typedef struct drm_version_32 { int version_major; /**< Major version */ int version_minor; /**< Minor version */ @@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, return 0; } +typedef struct drm_mode_fb_cmd232 { + u32 fb_id; + u32 width; + u32 height; + u32 pixel_format; + u32 flags; + u32 handles[4]; + u32 pitches[4]; + u32 offsets[4]; + u64 modifier[4]; +} __attribute__((packed)) drm_mode_fb_cmd232_t; + +static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg; + struct drm_mode_fb_cmd232 req32; + struct drm_mode_fb_cmd2 __user *req64; + int i; + int err; + + if (copy_from_user(&req32, argp, sizeof(req32))) + return -EFAULT; + + req64 = compat_alloc_user_space(sizeof(*req64)); + + if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64)) + || __put_user(req32.width, &req64->width) + || __put_user(req32.height, &req64->height) + || __put_user(req32.pixel_format, &req64->pixel_format) + || __put_user(req32.flags, &req64->flags)) + return -EFAULT; + + for (i = 0; i < 4; i++) { + if (__put_user(req32.handles[i], &req64->handles[i])) + return -EFAULT; + if (__put_user(req32.pitches[i], &req64->pitches[i])) + return -EFAULT; + if (__put_user(req32.offsets[i], &req64->offsets[i])) + return -EFAULT; + if (__put_user(req32.modifier[i], &req64->modifier[i])) + return -EFAULT; + } + + err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64); + if (err) + return err; + + if (__get_user(req32.fb_id, &req64->fb_id)) + return -EFAULT; + + if (copy_to_user(argp, &req32, sizeof(req32))) + return -EFAULT; + + return 0; +} + static drm_ioctl_compat_t *drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, @@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw, #endif [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, + [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2, }; /** diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 542fac628b28..5f27290201e0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -826,6 +826,7 @@ struct intel_context { struct kref ref; int user_handle; uint8_t remap_slice; + struct drm_i915_private *i915; struct drm_i915_file_private *file_priv; struct i915_ctx_hang_stats hang_stats; struct i915_hw_ppgtt *ppgtt; @@ -2036,8 +2037,6 @@ struct drm_i915_gem_object { unsigned int cache_level:3; unsigned int cache_dirty:1; - unsigned int has_dma_mapping:1; - unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; unsigned int pin_display; @@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor); int i915_debugfs_connector_add(struct drm_connector *connector); void intel_display_crc_init(struct drm_device *dev); #else -static inline int i915_debugfs_connector_add(struct drm_connector *connector) {} +static inline int i915_debugfs_connector_add(struct drm_connector *connector) +{ return 0; } static inline void intel_display_crc_init(struct drm_device *dev) {} #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 248fd1ac7b3a..52b446b27b4d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) sg_dma_len(sg) = obj->base.size; obj->pages = st; - obj->has_dma_mapping = true; return 0; } @@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) sg_free_table(obj->pages); kfree(obj->pages); - - obj->has_dma_mapping = false; } static void @@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + i915_gem_gtt_finish_object(obj); + if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct sg_page_iter sg_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ + int ret; gfp_t gfp; /* Assert that the object is not currently in any GPU domain. As it @@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) */ i915_gem_shrink_all(dev_priv); page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) + if (IS_ERR(page)) { + ret = PTR_ERR(page); goto err_pages; + } } #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { @@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) sg_mark_end(sg); obj->pages = st; + ret = i915_gem_gtt_prepare_object(obj); + if (ret) + goto err_pages; + if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj); @@ -2300,10 +2306,10 @@ err_pages: * space and so want to translate the error from shmemfs back to our * usual understanding of ENOMEM. */ - if (PTR_ERR(page) == -ENOSPC) - return -ENOMEM; - else - return PTR_ERR(page); + if (ret == -ENOSPC) + ret = -ENOMEM; + + return ret; } /* Ensure that the associated pages are gathered from the backing storage @@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring, } request->emitted_jiffies = jiffies; + ring->last_submitted_seqno = request->seqno; list_add_tail(&request->list, &ring->request_list); request->file_priv = NULL; @@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma) /* Since the unbound list is global, only move to that list if * no more VMAs exist. */ - if (list_empty(&obj->vma_list)) { - i915_gem_gtt_finish_object(obj); + if (list_empty(&obj->vma_list)) list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); - } /* And finally now the object is completely decoupled from this vma, * we can drop its hold on the backing storage and allow it to be @@ -3768,22 +3773,16 @@ search_free: goto err_remove_node; } - ret = i915_gem_gtt_prepare_object(obj); - if (ret) - goto err_remove_node; - trace_i915_vma_bind(vma, flags); ret = i915_vma_bind(vma, obj->cache_level, flags); if (ret) - goto err_finish_gtt; + goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&vma->mm_list, &vm->inactive_list); return vma; -err_finish_gtt: - i915_gem_gtt_finish_object(obj); err_remove_node: drm_mm_remove_node(&vma->node); err_free_vma: diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d65cbe6afb92..48afa777e94a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev) void i915_gem_context_free(struct kref *ctx_ref) { - struct intel_context *ctx = container_of(ctx_ref, - typeof(*ctx), ref); + struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); trace_i915_context_free(ctx); @@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev, kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->context_list); + ctx->i915 = dev_priv; if (dev_priv->hw_context_size) { struct drm_i915_gem_object *obj = diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 7998da27c500..e9c2bfd85b52 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) return PTR_ERR(sg); obj->pages = sg; - obj->has_dma_mapping = true; return 0; } @@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) { dma_buf_unmap_attachment(obj->base.import_attach, obj->pages, DMA_BIDIRECTIONAL); - obj->has_dma_mapping = false; } static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index dcc6a88c560e..56b52a4767d4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) { - if (obj->has_dma_mapping) - return 0; - if (!dma_map_sg(&obj->base.dev->pdev->dev, obj->pages->sgl, obj->pages->nents, PCI_DMA_BIDIRECTIONAL)) @@ -1972,10 +1969,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) interruptible = do_idling(dev_priv); - if (!obj->has_dma_mapping) - dma_unmap_sg(&dev->pdev->dev, - obj->pages->sgl, obj->pages->nents, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents, + PCI_DMA_BIDIRECTIONAL); undo_idling(dev_priv, interruptible); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 348ed5abcdbf..8b5b784c62fe 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev, if (obj->pages == NULL) goto cleanup; - obj->has_dma_mapping = true; i915_gem_object_pin_pages(obj); obj->stolen = stolen; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index d61e74a08f82..633bd1fcab69 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) if (IS_GEN4(dev)) { uint32_t ddc2 = I915_READ(DCC2); - if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) { - /* Since the swizzling may vary within an - * object, we have no idea what the swizzling - * is for any page in particular. Thus we - * cannot migrate tiled pages using the GPU, - * nor can we tell userspace what the exact - * swizzling is for any object. - */ + if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; - swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; - swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - } } if (dcc == 0xffffffff) { diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 1f4e5a32a16e..8fd431bcdfd3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -545,6 +545,26 @@ err: return ret; } +static int +__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, + struct page **pvec, int num_pages) +{ + int ret; + + ret = st_set_pages(&obj->pages, pvec, num_pages); + if (ret) + return ret; + + ret = i915_gem_gtt_prepare_object(obj); + if (ret) { + sg_free_table(obj->pages); + kfree(obj->pages); + obj->pages = NULL; + } + + return ret; +} + static void __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { @@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) if (obj->userptr.work != &work->work) { ret = 0; } else if (pinned == num_pages) { - ret = st_set_pages(&obj->pages, pvec, num_pages); + ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); if (ret == 0) { list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + pinned = 0; } } @@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) } } } else { - ret = st_set_pages(&obj->pages, pvec, num_pages); + ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); if (ret == 0) { obj->userptr.work = NULL; pinned = 0; @@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) if (obj->madv != I915_MADV_WILLNEED) obj->dirty = 0; + i915_gem_gtt_finish_object(obj); + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { struct page *page = sg_page_iter_page(&sg_iter); diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 176de6322e4d..23aa04cded6b 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) drm_ioctl_compat_t *fn = NULL; int ret; - if (nr < DRM_COMMAND_BASE) + if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END) return drm_compat_ioctl(filp, cmd, arg); if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e6bb72dca3ff..984e2fe6688c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static struct drm_i915_gem_request * -ring_last_request(struct intel_engine_cs *ring) -{ - return list_entry(ring->request_list.prev, - struct drm_i915_gem_request, list); -} - static bool -ring_idle(struct intel_engine_cs *ring) +ring_idle(struct intel_engine_cs *ring, u32 seqno) { return (list_empty(&ring->request_list) || - i915_gem_request_completed(ring_last_request(ring), false)); + i915_seqno_passed(seqno, ring->last_submitted_seqno)); } static bool @@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) acthd = intel_ring_get_active_head(ring); if (ring->hangcheck.seqno == seqno) { - if (ring_idle(ring)) { + if (ring_idle(ring, seqno)) { ring->hangcheck.action = HANGCHECK_IDLE; if (waitqueue_active(&ring->irq_queue)) { diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 497cba5deb1e..849a2590e010 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context, TP_fast_assign( __entry->ctx = ctx; __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; - __entry->dev = ctx->file_priv->dev_priv->dev->primary->index; + __entry->dev = ctx->i915->dev->primary->index; ), TP_printk("dev=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 647b1404c441..30e0f54ba19d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6315,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_connector *connector; struct drm_i915_private *dev_priv = dev->dev_private; - /* crtc should still be enabled when we disable it. */ - WARN_ON(!crtc->state->enable); - intel_crtc_disable_planes(crtc); dev_priv->display.crtc_disable(crtc); dev_priv->display.off(crtc); @@ -12591,7 +12588,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc, continue; if (!crtc_state->enable) { - intel_crtc_disable(crtc); + if (crtc->state->enable) + intel_crtc_disable(crtc); } else if (crtc->state->enable) { intel_crtc_disable_planes(crtc); dev_priv->display.crtc_disable(crtc); @@ -13276,7 +13274,7 @@ intel_check_primary_plane(struct drm_plane *plane, if (ret) return ret; - if (intel_crtc->active) { + if (crtc_state ? crtc_state->base.active : intel_crtc->active) { struct intel_plane_state *old_state = to_intel_plane_state(plane->state); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index e539314ae87e..4be66f60504d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -275,6 +275,13 @@ struct intel_engine_cs { * Do we have some not yet emitted requests outstanding? */ struct drm_i915_gem_request *outstanding_lazy_request; + /** + * Seqno of request most recently submitted to request_list. + * Used exclusively by hang checker to avoid grabbing lock while + * inspecting request list. + */ + u32 last_submitted_seqno; + bool gpu_caches_dirty; wait_queue_head_t irq_queue; diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 214eceefc981..e671ad369416 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder) switch (tve->mode) { case TVE_MODE_VGA: - imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24, + imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, tve->hsync_pin, tve->vsync_pin); break; case TVE_MODE_TVOUT: diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 74a9ce40ddc4..b4deb9cf9d71 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -21,6 +21,7 @@ #include <drm/drm_panel.h> #include <linux/videodev2.h> #include <video/of_display_timing.h> +#include <linux/of_graph.h> #include "imx-drm.h" @@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct device_node *np = dev->of_node; - struct device_node *panel_node; + struct device_node *port; const u8 *edidp; struct imx_parallel_display *imxpd; int ret; @@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; } - panel_node = of_parse_phandle(np, "fsl,panel", 0); - if (panel_node) { - imxpd->panel = of_drm_find_panel(panel_node); - if (!imxpd->panel) - return -EPROBE_DEFER; + /* port@1 is the output port */ + port = of_graph_get_port_by_id(np, 1); + if (port) { + struct device_node *endpoint, *remote; + + endpoint = of_get_child_by_name(port, "endpoint"); + if (endpoint) { + remote = of_graph_get_remote_port_parent(endpoint); + if (remote) + imxpd->panel = of_drm_find_panel(remote); + if (!imxpd->panel) + return -EPROBE_DEFER; + } } imxpd->dev = dev; diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 8730562323a8..4a09947be244 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev) tmp |= DPM_ENABLED; break; default: - DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); + DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift); break; } WREG32_SMC(CNB_PWRMGT_CNTL, tmp); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 5450fa95a47e..c4777c8d0312 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, } } } - mb(); - radeon_gart_tlb_flush(rdev); + if (rdev->gart.ptr) { + mb(); + radeon_gart_tlb_flush(rdev); + } } /** @@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, page_base += RADEON_GPU_PAGE_SIZE; } } - mb(); - radeon_gart_tlb_flush(rdev); + if (rdev->gart.ptr) { + mb(); + radeon_gart_tlb_flush(rdev); + } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 013ec7106e55..3dcc5733ff69 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj) if (robj) { if (robj->gem_base.import_attach) drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); + radeon_mn_unregister(robj); radeon_bo_unref(&robj); } } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 318165d4855c..676362769b8d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct radeon_bo, tbo); radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); - radeon_mn_unregister(bo); mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 1dbdf3230dae..787cd8fd897f 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 01b558fe3695..9a0c2911272a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = { .probe = rockchip_drm_platform_probe, .remove = rockchip_drm_platform_remove, .driver = { - .owner = THIS_MODULE, .name = "rockchip-drm", .of_match_table = rockchip_drm_dt_ids, .pm = &rockchip_drm_pm_ops, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 77d52893d40f..002645bb5bbf 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev) struct rockchip_drm_private *private = dev->dev_private; struct drm_fb_helper *fb_helper = &private->fbdev_helper; - drm_fb_helper_hotplug_event(fb_helper); + if (fb_helper) + drm_fb_helper_hotplug_event(fb_helper); } static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index eb2282cc4a56..eba5f8a52fbd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) &rk_obj->dma_attrs); } -int rockchip_gem_mmap_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) + { + int ret; struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; - unsigned long vm_size; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - vm_size = vma->vm_end - vma->vm_start; - - if (vm_size > obj->size) - return -EINVAL; + /* + * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear + * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). + */ + vma->vm_flags &= ~VM_PFNMAP; - return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, + ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, &rk_obj->dma_attrs); + if (ret) + drm_gem_vm_close(vma); + + return ret; } -/* drm driver mmap file operations */ -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) +int rockchip_gem_mmap_buf(struct drm_gem_object *obj, + struct vm_area_struct *vma) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_gem_object *obj; - struct drm_vma_offset_node *node; + struct drm_device *drm = obj->dev; int ret; - if (drm_device_is_unplugged(dev)) - return -ENODEV; + mutex_lock(&drm->struct_mutex); + ret = drm_gem_mmap_obj(obj, obj->size, vma); + mutex_unlock(&drm->struct_mutex); + if (ret) + return ret; - mutex_lock(&dev->struct_mutex); + return rockchip_drm_gem_object_mmap(obj, vma); +} - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, - vma->vm_pgoff, - vma_pages(vma)); - if (!node) { - mutex_unlock(&dev->struct_mutex); - DRM_ERROR("failed to find vma node.\n"); - return -EINVAL; - } else if (!drm_vma_node_is_allowed(node, filp)) { - mutex_unlock(&dev->struct_mutex); - return -EACCES; - } +/* drm driver mmap file operations */ +int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_object *obj; + int ret; - obj = container_of(node, struct drm_gem_object, vma_node); - ret = rockchip_gem_mmap_buf(obj, vma); + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; - mutex_unlock(&dev->struct_mutex); + obj = vma->vm_private_data; - return ret; + return rockchip_drm_gem_object_mmap(obj, vma); } struct rockchip_gem_object * diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index dc65161d7cad..34b78e736532 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -170,6 +170,7 @@ struct vop_win_phy { struct vop_reg enable; struct vop_reg format; + struct vop_reg rb_swap; struct vop_reg act_info; struct vop_reg dsp_info; struct vop_reg dsp_st; @@ -199,8 +200,12 @@ struct vop_data { static const uint32_t formats_01[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV24, @@ -209,8 +214,12 @@ static const uint32_t formats_01[] = { static const uint32_t formats_234[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, }; static const struct vop_win_phy win01_data = { @@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = { .nformats = ARRAY_SIZE(formats_01), .enable = VOP_REG(WIN0_CTRL0, 0x1, 0), .format = VOP_REG(WIN0_CTRL0, 0x7, 1), + .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12), .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0), .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0), .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0), @@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = { .nformats = ARRAY_SIZE(formats_234), .enable = VOP_REG(WIN2_CTRL0, 0x1, 0), .format = VOP_REG(WIN2_CTRL0, 0x7, 1), + .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12), .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0), .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0), .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0), @@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = { .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0), }; -static const struct vop_win_phy cursor_data = { - .data_formats = formats_234, - .nformats = ARRAY_SIZE(formats_234), - .enable = VOP_REG(HWC_CTRL0, 0x1, 0), - .format = VOP_REG(HWC_CTRL0, 0x7, 1), - .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0), -}; - static const struct vop_ctrl ctrl_data = { .standby = VOP_REG(SYS_CTRL, 0x1, 22), .gate_en = VOP_REG(SYS_CTRL, 0x1, 23), @@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = { /* * Note: rk3288 has a dedicated 'cursor' window, however, that window requires * special support to get alpha blending working. For now, just use overlay - * window 1 for the drm cursor. + * window 3 for the drm cursor. + * */ static const struct vop_win_data rk3288_vop_win_data[] = { { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR }, + { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY }, { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY }, + { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR }, }; static const struct vop_data rk3288_vop = { @@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset, } } +static bool has_rb_swapped(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_BGR888: + case DRM_FORMAT_BGR565: + return true; + default: + return false; + } +} + static enum vop_data_format vop_convert_format(uint32_t format) { switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: return VOP_FMT_ARGB8888; case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: return VOP_FMT_RGB888; case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: return VOP_FMT_RGB565; case DRM_FORMAT_NV12: return VOP_FMT_YUV420SP; @@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format) { switch (format) { case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: return true; default: return false; @@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane, enum vop_data_format format; uint32_t val; bool is_alpha; + bool rb_swap; bool visible; int ret; struct drm_rect dest = { @@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane, return 0; is_alpha = is_alpha_support(fb->pixel_format); + rb_swap = has_rb_swapped(fb->pixel_format); format = vop_convert_format(fb->pixel_format); if (format < 0) return format; @@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane, val = (dsp_sty - 1) << 16; val |= (dsp_stx - 1) & 0xffff; VOP_WIN_SET(vop, win, dsp_st, val); + VOP_WIN_SET(vop, win, rb_swap, rb_swap); if (is_alpha) { VOP_WIN_SET(vop, win, dst_alpha_ctl, diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 3077f1554099..624d941aaad1 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) } else { pool->npages_free += count; list_splice(&ttm_dma->pages_list, &pool->free_list); - npages = count; - if (pool->npages_free > _manager->options.max_size) { + /* + * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages + * to free in order to minimize calls to set_memory_wb(). + */ + if (pool->npages_free >= (_manager->options.max_size + + NUM_PAGES_TO_ALLOC)) npages = pool->npages_free - _manager->options.max_size; - /* free at least NUM_PAGES_TO_ALLOC number of pages - * to reduce calls to set_memory_wb */ - if (npages < NUM_PAGES_TO_ALLOC) - npages = NUM_PAGES_TO_ALLOC; - } } spin_unlock_irqrestore(&pool->lock, irq_flags); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6d2f39d36e44..00f2058944e5 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu) return ret; } + for (i = 0; i < IPU_NUM_IRQS; i += 32) + ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32)); + for (i = 0; i < IPU_NUM_IRQS; i += 32) { gc = irq_get_domain_generic_chip(ipu->domain, i); gc->reg_base = ipu->cm_reg; diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c index 4e70f51c2370..cc5a35750b50 100644 --- a/drivers/iio/accel/bmc150-accel.c +++ b/drivers/iio/accel/bmc150-accel.c @@ -1464,7 +1464,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data, { int i; - for (i = from; i >= 0; i++) { + for (i = from; i >= 0; i--) { if (data->triggers[i].indio_trig) { iio_trigger_unregister(data->triggers[i].indio_trig); data->triggers[i].indio_trig = NULL; diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 7c5565891cb8..eb0cd897714a 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -153,8 +153,7 @@ config DA9150_GPADC config CC10001_ADC tristate "Cosmic Circuits 10001 ADC driver" - depends on HAVE_CLK || REGULATOR - depends on HAS_IOMEM + depends on HAS_IOMEM && HAVE_CLK && REGULATOR select IIO_BUFFER select IIO_TRIGGERED_BUFFER help diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 8a0eb4a04fb5..7b40925dd4ff 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -182,7 +182,7 @@ struct at91_adc_caps { u8 ts_pen_detect_sensitivity; /* startup time calculate function */ - u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz); + u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz); u8 num_channels; struct at91_adc_reg_desc registers; @@ -201,7 +201,7 @@ struct at91_adc_state { u8 num_channels; void __iomem *reg_base; struct at91_adc_reg_desc *registers; - u8 startup_time; + u32 startup_time; u8 sample_hold_time; bool sleep_mode; struct iio_trigger **trig; @@ -779,7 +779,7 @@ ret: return ret; } -static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) +static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz) { /* * Number of ticks needed to cover the startup time of the ADC @@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8; } -static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz) +static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz) { /* * For sama5d3x and at91sam9x5, the formula changes to: diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 8d4e019ea4ca..9c311c1e1ac7 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = { }; module_platform_driver(rockchip_saradc_driver); + +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_DESCRIPTION("Rockchip SARADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c index 06f4792240f0..ebe415f10640 100644 --- a/drivers/iio/adc/twl4030-madc.c +++ b/drivers/iio/adc/twl4030-madc.c @@ -833,7 +833,8 @@ static int twl4030_madc_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, twl4030_madc_threaded_irq_handler, - IRQF_TRIGGER_RISING, "twl4030_madc", madc); + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "twl4030_madc", madc); if (ret) { dev_err(&pdev->dev, "could not request irq\n"); goto err_i2c; diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 610fc98f88ef..595511022795 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) s32 poll_value = 0; if (state) { + if (!atomic_read(&st->user_requested_state)) + return 0; if (sensor_hub_device_open(st->hsdev)) return -EIO; @@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) poll_value = hid_sensor_read_poll_value(st); } else { - if (!atomic_dec_and_test(&st->data_ready)) + int val; + + val = atomic_dec_if_positive(&st->data_ready); + if (val < 0) return 0; + sensor_hub_device_close(st->hsdev); state_val = hid_sensor_get_usage_index(st->hsdev, st->power_state.report_id, @@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state); int hid_sensor_power_state(struct hid_sensor_common *st, bool state) { + #ifdef CONFIG_PM int ret; + atomic_set(&st->user_requested_state, state); if (state) ret = pm_runtime_get_sync(&st->pdev->dev); else { @@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) return 0; #else + atomic_set(&st->user_requested_state, state); return _hid_sensor_power_state(st, state); #endif } diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index 61bb9d4239ea..e98428df0d44 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -22,7 +22,7 @@ #include "ad5624r.h" static int ad5624r_spi_write(struct spi_device *spi, - u8 cmd, u8 addr, u16 val, u8 len) + u8 cmd, u8 addr, u16 val, u8 shift) { u32 data; u8 msg[3]; @@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi, * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, * for the AD5664R, AD5644R, and AD5624R, respectively. */ - data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len)); + data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift); msg[0] = data >> 16; msg[1] = data >> 8; msg[2] = data; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 17d4bb15be4d..65ce86837177 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) return -EINVAL; } +static int inv_write_raw_get_fmt(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_ANGL_VEL: + return IIO_VAL_INT_PLUS_NANO; + default: + return IIO_VAL_INT_PLUS_MICRO; + } + default: + return IIO_VAL_INT_PLUS_MICRO; + } + + return -EINVAL; +} static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) { int result, i; @@ -696,6 +713,7 @@ static const struct iio_info mpu_info = { .driver_module = THIS_MODULE, .read_raw = &inv_mpu6050_read_raw, .write_raw = &inv_mpu6050_write_raw, + .write_raw_get_fmt = &inv_write_raw_get_fmt, .attrs = &inv_attribute_group, .validate_trigger = inv_mpu6050_validate_trigger, }; diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index e6198b7c9cbf..a5c59251ec0e 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -188,6 +188,7 @@ config SENSORS_LM3533 config LTR501 tristate "LTR-501ALS-01 light sensor" depends on I2C + select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER help @@ -201,6 +202,7 @@ config LTR501 config STK3310 tristate "STK3310 ALS and proximity sensor" depends on I2C + select REGMAP_I2C help Say yes here to get support for the Sensortek STK3310 ambient light and proximity sensor. The STK3311 model is also supported by this diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c index 869033e48a1f..a1d4905cc9d2 100644 --- a/drivers/iio/light/cm3323.c +++ b/drivers/iio/light/cm3323.c @@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2) for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) { if (val == cm3323_int_time[i].val && val2 == cm3323_int_time[i].val2) { - reg_conf = data->reg_conf; + reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK; reg_conf |= i << CM3323_CONF_IT_SHIFT; ret = i2c_smbus_write_word_data(data->client, diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 1ef7d3773ab9..b5a0e66b5f28 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data) if (ret < 0) return ret; - data->als_contr = ret | data->chip_info->als_mode_active; + data->als_contr = status | data->chip_info->als_mode_active; ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status); if (ret < 0) diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index fee4297d7c8f..c1a218236be5 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -43,7 +43,6 @@ #define STK3311_CHIP_ID_VAL 0x1D #define STK3310_PSINT_EN 0x01 #define STK3310_PS_MAX_VAL 0xFFFF -#define STK3310_THRESH_MAX 0xFFFF #define STK3310_DRIVER_NAME "stk3310" #define STK3310_REGMAP_NAME "stk3310_regmap" @@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint = REG_FIELD(STK3310_REG_FLAG, 4, 4); static const struct reg_field stk3310_reg_field_flag_nf = REG_FIELD(STK3310_REG_FLAG, 0, 0); -/* - * Maximum PS values with regard to scale. Used to export the 'inverse' - * PS value (high values for far objects, low values for near objects). - */ + +/* Estimate maximum proximity values with regard to measurement scale. */ static const int stk3310_ps_max[4] = { - STK3310_PS_MAX_VAL / 64, - STK3310_PS_MAX_VAL / 16, - STK3310_PS_MAX_VAL / 4, - STK3310_PS_MAX_VAL, + STK3310_PS_MAX_VAL / 640, + STK3310_PS_MAX_VAL / 160, + STK3310_PS_MAX_VAL / 40, + STK3310_PS_MAX_VAL / 10 }; static const int stk3310_scale_table[][2] = { @@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = { /* Proximity event */ { .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_FALLING, + .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, /* Out-of-proximity event */ { .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_RISING, + .dir = IIO_EV_DIR_FALLING, .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, @@ -205,23 +202,16 @@ static int stk3310_read_event(struct iio_dev *indio_dev, u8 reg; u16 buf; int ret; - unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); if (info != IIO_EV_INFO_VALUE) return -EINVAL; - /* - * Only proximity interrupts are implemented at the moment. - * Since we're inverting proximity values, the sensor's 'high' - * threshold will become our 'low' threshold, associated with - * 'near' events. Similarly, the sensor's 'low' threshold will - * be our 'high' threshold, associated with 'far' events. - */ + /* Only proximity interrupts are implemented at the moment. */ if (dir == IIO_EV_DIR_RISING) - reg = STK3310_REG_THDL_PS; - else if (dir == IIO_EV_DIR_FALLING) reg = STK3310_REG_THDH_PS; + else if (dir == IIO_EV_DIR_FALLING) + reg = STK3310_REG_THDL_PS; else return -EINVAL; @@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, dev_err(&data->client->dev, "register read failed\n"); return ret; } - regmap_field_read(data->reg_ps_gain, &index); - *val = swab16(stk3310_ps_max[index] - buf); + *val = swab16(buf); return IIO_VAL_INT; } @@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev, return -EINVAL; if (dir == IIO_EV_DIR_RISING) - reg = STK3310_REG_THDL_PS; - else if (dir == IIO_EV_DIR_FALLING) reg = STK3310_REG_THDH_PS; + else if (dir == IIO_EV_DIR_FALLING) + reg = STK3310_REG_THDL_PS; else return -EINVAL; - buf = swab16(stk3310_ps_max[index] - val); + buf = swab16(val); ret = regmap_bulk_write(data->regmap, reg, &buf, 2); if (ret < 0) dev_err(&client->dev, "failed to set PS threshold!\n"); @@ -334,14 +323,6 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, return ret; } *val = swab16(buf); - if (chan->type == IIO_PROXIMITY) { - /* - * Invert the proximity data so we return low values - * for close objects and high values for far ones. - */ - regmap_field_read(data->reg_ps_gain, &index); - *val = stk3310_ps_max[index] - *val; - } mutex_unlock(&data->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_INT_TIME: @@ -581,8 +562,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) } event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, - (dir ? IIO_EV_DIR_RISING : - IIO_EV_DIR_FALLING)); + (dir ? IIO_EV_DIR_FALLING : + IIO_EV_DIR_RISING)); iio_push_event(indio_dev, event, data->timestamp); /* Reset the interrupt flag */ diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c index 71c2bde275aa..f8b1df018abe 100644 --- a/drivers/iio/light/tcs3414.c +++ b/drivers/iio/light/tcs3414.c @@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev, if (val != 0) return -EINVAL; for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) { - if (val == tcs3414_times[i] * 1000) { + if (val2 == tcs3414_times[i] * 1000) { data->timing &= ~TCS3414_INTEG_MASK; data->timing |= i; return i2c_smbus_write_byte_data( diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c index 7a2ea71c659a..d927397a6ef7 100644 --- a/drivers/iio/magnetometer/mmc35240.c +++ b/drivers/iio/magnetometer/mmc35240.c @@ -84,10 +84,10 @@ #define MMC35240_OTP_START_ADDR 0x1B enum mmc35240_resolution { - MMC35240_16_BITS_SLOW = 0, /* 100 Hz */ - MMC35240_16_BITS_FAST, /* 200 Hz */ - MMC35240_14_BITS, /* 333 Hz */ - MMC35240_12_BITS, /* 666 Hz */ + MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */ + MMC35240_16_BITS_FAST, /* 4.08 ms */ + MMC35240_14_BITS, /* 2.16 ms */ + MMC35240_12_BITS, /* 1.20 ms */ }; enum mmc35240_axis { @@ -100,22 +100,22 @@ static const struct { int sens[3]; /* sensitivity per X, Y, Z axis */ int nfo; /* null field output */ } mmc35240_props_table[] = { - /* 16 bits, 100Hz ODR */ + /* 16 bits, 125Hz ODR */ { {1024, 1024, 1024}, 32768, }, - /* 16 bits, 200Hz ODR */ + /* 16 bits, 250Hz ODR */ { {1024, 1024, 770}, 32768, }, - /* 14 bits, 333Hz ODR */ + /* 14 bits, 450Hz ODR */ { {256, 256, 193}, 8192, }, - /* 12 bits, 666Hz ODR */ + /* 12 bits, 800Hz ODR */ { {64, 64, 48}, 2048, @@ -133,9 +133,15 @@ struct mmc35240_data { int axis_scale[3]; }; -static const int mmc35240_samp_freq[] = {100, 200, 333, 666}; +static const struct { + int val; + int val2; +} mmc35240_samp_freq[] = { {1, 500000}, + {13, 0}, + {25, 0}, + {50, 0} }; -static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666"); +static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50"); #define MMC35240_CHANNEL(_axis) { \ .type = IIO_MAGN, \ @@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data, int i; for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++) - if (mmc35240_samp_freq[i] == val) + if (mmc35240_samp_freq[i].val == val && + mmc35240_samp_freq[i].val2 == val2) return i; return -EINVAL; } @@ -378,9 +385,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev, if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq)) return -EINVAL; - *val = mmc35240_samp_freq[i]; - *val2 = 0; - return IIO_VAL_INT; + *val = mmc35240_samp_freq[i].val; + *val2 = mmc35240_samp_freq[i].val2; + return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c index 2042e375f835..3d756bd8c703 100644 --- a/drivers/iio/proximity/sx9500.c +++ b/drivers/iio/proximity/sx9500.c @@ -80,6 +80,7 @@ #define SX9500_COMPSTAT_MASK GENMASK(3, 0) #define SX9500_NUM_CHANNELS 4 +#define SX9500_CHAN_MASK GENMASK(SX9500_NUM_CHANNELS - 1, 0) struct sx9500_data { struct mutex mutex; @@ -281,7 +282,7 @@ static int sx9500_read_prox_data(struct sx9500_data *data, if (ret < 0) return ret; - *val = 32767 - (s16)be16_to_cpu(regval); + *val = be16_to_cpu(regval); return IIO_VAL_INT; } @@ -329,27 +330,29 @@ static int sx9500_read_proximity(struct sx9500_data *data, else ret = sx9500_wait_for_sample(data); - if (ret < 0) - return ret; - mutex_lock(&data->mutex); - ret = sx9500_read_prox_data(data, chan, val); if (ret < 0) - goto out; + goto out_dec_data_rdy; - ret = sx9500_dec_chan_users(data, chan->channel); + ret = sx9500_read_prox_data(data, chan, val); if (ret < 0) - goto out; + goto out_dec_data_rdy; ret = sx9500_dec_data_rdy_users(data); if (ret < 0) + goto out_dec_chan; + + ret = sx9500_dec_chan_users(data, chan->channel); + if (ret < 0) goto out; ret = IIO_VAL_INT; goto out; +out_dec_data_rdy: + sx9500_dec_data_rdy_users(data); out_dec_chan: sx9500_dec_chan_users(data, chan->channel); out: @@ -679,7 +682,7 @@ out: static int sx9500_buffer_preenable(struct iio_dev *indio_dev) { struct sx9500_data *data = iio_priv(indio_dev); - int ret, i; + int ret = 0, i; mutex_lock(&data->mutex); @@ -703,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev) static int sx9500_buffer_predisable(struct iio_dev *indio_dev) { struct sx9500_data *data = iio_priv(indio_dev); - int ret, i; + int ret = 0, i; iio_triggered_buffer_predisable(indio_dev); @@ -800,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev) unsigned int val; ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, - GENMASK(SX9500_NUM_CHANNELS, 0), - GENMASK(SX9500_NUM_CHANNELS, 0)); + SX9500_CHAN_MASK, SX9500_CHAN_MASK); if (ret < 0) return ret; @@ -821,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev) out: regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, - GENMASK(SX9500_NUM_CHANNELS, 0), 0); + SX9500_CHAN_MASK, 0); return ret; } diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c index fcc49f89b946..8f21f32f9739 100644 --- a/drivers/iio/temperature/tmp006.c +++ b/drivers/iio/temperature/tmp006.c @@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev, struct tmp006_data *data = iio_priv(indio_dev); int i; + if (mask != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++) if ((val == tmp006_freqs[i][0]) && (val2 == tmp006_freqs[i][1])) { diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index c7dcfe4ca5f1..0429040304fd 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh * struct ib_ah *ah; struct ib_mad_send_wr_private *mad_send_wr; - if (device->node_type == RDMA_NODE_IB_SWITCH) + if (rdma_cap_ib_switch(device)) port_priv = ib_get_agent_port(device, 0); else port_priv = ib_get_agent_port(device, port_num); @@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh * memcpy(send_buf->mad, mad_hdr, resp_mad_len); send_buf->ah = ah; - if (device->node_type == RDMA_NODE_IB_SWITCH) { + if (rdma_cap_ib_switch(device)) { mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index dbddddd6fb5d..3a972ebf3c0d 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -169,6 +169,7 @@ struct cm_device { struct ib_device *ib_device; struct device *device; u8 ack_delay; + int going_down; struct cm_port *port[0]; }; @@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; unsigned long flags; + struct cm_device *cm_dev; + + cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); + if (!cm_dev) + return; spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); @@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) */ cm_id_priv->id.state = IB_CM_TIMEWAIT; wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); - queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, - msecs_to_jiffies(wait_time)); + + /* Check if the device started its remove_one */ + spin_lock_irq(&cm.lock); + if (!cm_dev->going_down) + queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, + msecs_to_jiffies(wait_time)); + spin_unlock_irq(&cm.lock); + cm_id_priv->timewait_info = NULL; } @@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id) struct cm_work *work; unsigned long flags; int ret = 0; + struct cm_device *cm_dev; + + cm_dev = ib_get_client_data(cm_id->device, &cm_client); + if (!cm_dev) + return -ENODEV; work = kmalloc(sizeof *work, GFP_ATOMIC); if (!work) @@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id) work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; - queue_delayed_work(cm.wq, &work->work, 0); + + /* Check if the device started its remove_one */ + spin_lock_irq(&cm.lock); + if (!cm_dev->going_down) { + queue_delayed_work(cm.wq, &work->work, 0); + } else { + kfree(work); + ret = -ENODEV; + } + spin_unlock_irq(&cm.lock); + out: return ret; } @@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, enum ib_cm_event_type event; u16 attr_id; int paths = 0; + int going_down = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: @@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = port; - queue_delayed_work(cm.wq, &work->work, 0); + + /* Check if the device started its remove_one */ + spin_lock_irq(&cm.lock); + if (!port->cm_dev->going_down) + queue_delayed_work(cm.wq, &work->work, 0); + else + going_down = 1; + spin_unlock_irq(&cm.lock); + + if (going_down) { + kfree(work); + ib_free_recv_mad(mad_recv_wc); + } } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, @@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device) cm_dev->ib_device = ib_device; cm_get_ack_delay(cm_dev); - + cm_dev->going_down = 0; cm_dev->device = device_create(&cm_class, &ib_device->dev, MKDEV(0, 0), NULL, "%s", ib_device->name); @@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device) list_del(&cm_dev->list); write_unlock_irqrestore(&cm.device_lock, flags); + spin_lock_irq(&cm.lock); + cm_dev->going_down = 1; + spin_unlock_irq(&cm.lock); + for (i = 1; i <= ib_device->phys_port_cnt; i++) { if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); - ib_unregister_mad_agent(port->mad_agent); + /* + * We flush the queue here after the going_down set, this + * verify that no new works will be queued in the recv handler, + * after that we can call the unregister_mad_agent + */ flush_workqueue(cm.wq); + ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); } device_unregister(cm_dev->device); diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index e6ffa2e66c1a..22a3abee2a54 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c @@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) err_str = "Invalid port mapper client"; goto pid_query_error; } - if (iwpm_registered_client(nl_client)) + if (iwpm_check_registration(nl_client, IWPM_REG_VALID) || + iwpm_user_pid == IWPM_PID_UNAVAILABLE) return 0; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); if (!skb) { @@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL); if (ret) { skb = NULL; /* skb is freed in the netlink send-op handling */ - iwpm_set_registered(nl_client, 1); iwpm_user_pid = IWPM_PID_UNAVAILABLE; err_str = "Unable to send a nlmsg"; goto pid_query_error; @@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) err_str = "Invalid port mapper client"; goto add_mapping_error; } - if (!iwpm_registered_client(nl_client)) { + if (!iwpm_valid_pid()) + return 0; + if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) { err_str = "Unregistered port mapper client"; goto add_mapping_error; } - if (!iwpm_valid_pid()) - return 0; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); if (!skb) { err_str = "Unable to create a nlmsg"; @@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) err_str = "Invalid port mapper client"; goto query_mapping_error; } - if (!iwpm_registered_client(nl_client)) { + if (!iwpm_valid_pid()) + return 0; + if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) { err_str = "Unregistered port mapper client"; goto query_mapping_error; } - if (!iwpm_valid_pid()) - return 0; ret = -ENOMEM; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); if (!skb) { @@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) err_str = "Invalid port mapper client"; goto remove_mapping_error; } - if (!iwpm_registered_client(nl_client)) { + if (!iwpm_valid_pid()) + return 0; + if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) { err_str = "Unregistered port mapper client"; goto remove_mapping_error; } - if (!iwpm_valid_pid()) - return 0; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); if (!skb) { ret = -ENOMEM; @@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb) pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", __func__, iwpm_user_pid); if (iwpm_valid_client(nl_client)) - iwpm_set_registered(nl_client, 1); + iwpm_set_registration(nl_client, IWPM_REG_VALID); register_pid_response_exit: nlmsg_request->request_done = 1; /* always for found nlmsg_request */ @@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; const char *msg_type = "Mapping Info response"; - int iwpm_pid; u8 nl_client; char *iwpm_name; u16 iwpm_version; @@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb) __func__, nl_client); return ret; } - iwpm_set_registered(nl_client, 0); + iwpm_set_registration(nl_client, IWPM_REG_INCOMPL); atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); + iwpm_user_pid = cb->nlh->nlmsg_pid; if (!iwpm_mapinfo_available()) return 0; - iwpm_pid = cb->nlh->nlmsg_pid; pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", - __func__, iwpm_pid); - ret = iwpm_send_mapinfo(nl_client, iwpm_pid); + __func__, iwpm_user_pid); + ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid); return ret; } EXPORT_SYMBOL(iwpm_mapping_info_cb); diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index a626795bf9c7..5fb089e91353 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -78,6 +78,7 @@ init_exit: mutex_unlock(&iwpm_admin_lock); if (!ret) { iwpm_set_valid(nl_client, 1); + iwpm_set_registration(nl_client, IWPM_REG_UNDEF); pr_debug("%s: Mapinfo and reminfo tables are created\n", __func__); } @@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client) } mutex_unlock(&iwpm_admin_lock); iwpm_set_valid(nl_client, 0); + iwpm_set_registration(nl_client, IWPM_REG_UNDEF); return 0; } EXPORT_SYMBOL(iwpm_exit); @@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid) } /* valid client */ -int iwpm_registered_client(u8 nl_client) +u32 iwpm_get_registration(u8 nl_client) { return iwpm_admin.reg_list[nl_client]; } /* valid client */ -void iwpm_set_registered(u8 nl_client, int reg) +void iwpm_set_registration(u8 nl_client, u32 reg) { iwpm_admin.reg_list[nl_client] = reg; } +/* valid client */ +u32 iwpm_check_registration(u8 nl_client, u32 reg) +{ + return (iwpm_get_registration(nl_client) & reg); +} + int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, struct sockaddr_storage *b_sockaddr) { diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h index ee2d9ff095be..b7b9e194ce81 100644 --- a/drivers/infiniband/core/iwpm_util.h +++ b/drivers/infiniband/core/iwpm_util.h @@ -58,6 +58,10 @@ #define IWPM_PID_UNDEFINED -1 #define IWPM_PID_UNAVAILABLE -2 +#define IWPM_REG_UNDEF 0x01 +#define IWPM_REG_VALID 0x02 +#define IWPM_REG_INCOMPL 0x04 + struct iwpm_nlmsg_request { struct list_head inprocess_list; __u32 nlmsg_seq; @@ -88,7 +92,7 @@ struct iwpm_admin_data { atomic_t refcount; atomic_t nlmsg_seq; int client_list[RDMA_NL_NUM_CLIENTS]; - int reg_list[RDMA_NL_NUM_CLIENTS]; + u32 reg_list[RDMA_NL_NUM_CLIENTS]; }; /** @@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client); void iwpm_set_valid(u8 nl_client, int valid); /** - * iwpm_registered_client - Check if the port mapper client is registered + * iwpm_check_registration - Check if the client registration + * matches the given one * @nl_client: The index of the netlink client + * @reg: The given registration type to compare with * * Call iwpm_register_pid() to register a client + * Returns true if the client registration matches reg, + * otherwise returns false + */ +u32 iwpm_check_registration(u8 nl_client, u32 reg); + +/** + * iwpm_set_registration - Set the client registration + * @nl_client: The index of the netlink client + * @reg: Registration type to set */ -int iwpm_registered_client(u8 nl_client); +void iwpm_set_registration(u8 nl_client, u32 reg); /** - * iwpm_set_registered - Set the port mapper client to registered or not + * iwpm_get_registration * @nl_client: The index of the netlink client - * @reg: 1 if registered or 0 if not + * + * Returns the client registration type */ -void iwpm_set_registered(u8 nl_client, int reg); +u32 iwpm_get_registration(u8 nl_client); /** * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index a4b1466c1bf6..786fc51bf04b 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, mad_agent_priv->qp_info->port_priv->port_num); - if (device->node_type == RDMA_NODE_IB_SWITCH && + if (rdma_cap_ib_switch(device) && smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) port_num = send_wr->wr.ud.port_num; else @@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, if ((opa_get_smp_direction(opa_smp) ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == OPA_LID_PERMISSIVE && - opa_smi_handle_dr_smp_send(opa_smp, device->node_type, + opa_smi_handle_dr_smp_send(opa_smp, + rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid directed route\n"); goto out; } opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); - if (opa_drslid != OPA_LID_PERMISSIVE && + if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) && opa_drslid & 0xffff0000) { ret = -EINVAL; dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", @@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, } else { if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == IB_LID_PERMISSIVE && - smi_handle_dr_smp_send(smp, device->node_type, port_num) == + smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) == IB_SMI_DISCARD) { ret = -EINVAL; dev_err(&device->dev, "Invalid directed route\n"); @@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv struct ib_smp *smp = (struct ib_smp *)recv->mad; if (smi_handle_dr_smp_recv(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) @@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv if (retsmi == IB_SMI_SEND) { /* don't forward */ if (smi_handle_dr_smp_send(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) return IB_SMI_DISCARD; - } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { + } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; @@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv, struct opa_smp *smp = (struct opa_smp *)recv->mad; if (opa_smi_handle_dr_smp_recv(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num, port_priv->device->phys_port_cnt) == IB_SMI_DISCARD) @@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv, if (retsmi == IB_SMI_SEND) { /* don't forward */ if (opa_smi_handle_dr_smp_send(smp, - port_priv->device->node_type, + rdma_cap_ib_switch(port_priv->device), port_num) == IB_SMI_DISCARD) return IB_SMI_DISCARD; @@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv, IB_SMI_DISCARD) return IB_SMI_DISCARD; - } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { + } else if (rdma_cap_ib_switch(port_priv->device)) { /* forward case for switches */ memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; @@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, goto out; } - if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) + if (rdma_cap_ib_switch(port_priv->device)) port_num = wc->port_num; else port_num = port_priv->port_num; @@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) static void ib_mad_init_device(struct ib_device *device) { - int start, end, i; + int start, i; - if (device->node_type == RDMA_NODE_IB_SWITCH) { - start = 0; - end = 0; - } else { - start = 1; - end = device->phys_port_cnt; - } + start = rdma_start_port(device); - for (i = start; i <= end; i++) { + for (i = start; i <= rdma_end_port(device); i++) { if (!rdma_cap_ib_mad(device, i)) continue; @@ -3342,17 +3337,9 @@ error: static void ib_mad_remove_device(struct ib_device *device) { - int start, end, i; - - if (device->node_type == RDMA_NODE_IB_SWITCH) { - start = 0; - end = 0; - } else { - start = 1; - end = device->phys_port_cnt; - } + int i; - for (i = start; i <= end; i++) { + for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { if (!rdma_cap_ib_mad(device, i)) continue; diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 1244f02a5c6d..2cb865c7ce7a 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device) if (!dev) return; - if (device->node_type == RDMA_NODE_IB_SWITCH) - dev->start_port = dev->end_port = 0; - else { - dev->start_port = 1; - dev->end_port = device->phys_port_cnt; - } + dev->start_port = rdma_start_port(device); + dev->end_port = rdma_end_port(device); for (i = 0; i <= dev->end_port - dev->start_port; i++) { if (!rdma_cap_ib_mcast(device, dev->start_port + i)) diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h index 62d91bfa4cb7..3bfab3505a29 100644 --- a/drivers/infiniband/core/opa_smi.h +++ b/drivers/infiniband/core/opa_smi.h @@ -39,12 +39,12 @@ #include "smi.h" -enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, +enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, int port_num, int phys_port_cnt); int opa_smi_get_fwd_port(struct opa_smp *smp); extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - u8 node_type, int port_num); + bool is_switch, int port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 0fae85062a65..ca919f429666 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device) int s, e, i; int count = 0; - if (device->node_type == RDMA_NODE_IB_SWITCH) - s = e = 0; - else { - s = 1; - e = device->phys_port_cnt; - } + s = rdma_start_port(device); + e = rdma_end_port(device); sa_dev = kzalloc(sizeof *sa_dev + (e - s + 1) * sizeof (struct ib_sa_port), diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 368a561d1a5d..f19b23817c2b 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -41,7 +41,7 @@ #include "smi.h" #include "opa_smi.h" -static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, +static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, const u8 *return_path, @@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, /* C14-9:2 */ if (*hop_ptr && *hop_ptr < hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; /* return_path set when received */ @@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, if (*hop_ptr == hop_cnt) { /* return_path set when received */ (*hop_ptr)++; - return (node_type == RDMA_NODE_IB_SWITCH || + return (is_switch || dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } @@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, /* C14-13:2 */ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; (*hop_ptr)--; @@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, if (*hop_ptr == 1) { (*hop_ptr)--; /* C14-13:3 -- SMPs destined for SM shouldn't be here */ - return (node_type == RDMA_NODE_IB_SWITCH || + return (is_switch || dr_slid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } @@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, * Return IB_SMI_DISCARD if the SMP should be discarded */ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - u8 node_type, int port_num) + bool is_switch, int port_num) { - return __smi_handle_dr_smp_send(node_type, port_num, + return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, smp->initial_path, smp->return_path, @@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, } enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, - u8 node_type, int port_num) + bool is_switch, int port_num) { - return __smi_handle_dr_smp_send(node_type, port_num, + return __smi_handle_dr_smp_send(is_switch, port_num, &smp->hop_ptr, smp->hop_cnt, smp->route.dr.initial_path, smp->route.dr.return_path, @@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, OPA_LID_PERMISSIVE); } -static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, +static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num, int phys_port_cnt, u8 *hop_ptr, u8 hop_cnt, const u8 *initial_path, @@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, /* C14-9:2 -- intermediate hop */ if (*hop_ptr && *hop_ptr < hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; return_path[*hop_ptr] = port_num; @@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, return_path[*hop_ptr] = port_num; /* hop_ptr updated when sending */ - return (node_type == RDMA_NODE_IB_SWITCH || + return (is_switch || dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } @@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, /* C14-13:2 */ if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { - if (node_type != RDMA_NODE_IB_SWITCH) + if (!is_switch) return IB_SMI_DISCARD; /* hop_ptr updated when sending */ @@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, return IB_SMI_HANDLE; } /* hop_ptr updated when sending */ - return (node_type == RDMA_NODE_IB_SWITCH ? - IB_SMI_HANDLE : IB_SMI_DISCARD); + return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> give to SM */ @@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, * Adjust information for a received SMP * Return IB_SMI_DISCARD if the SMP should be dropped */ -enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, +enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, int port_num, int phys_port_cnt) { - return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, + return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, smp->initial_path, smp->return_path, @@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, * Adjust information for a received SMP * Return IB_SMI_DISCARD if the SMP should be dropped */ -enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, +enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch, int port_num, int phys_port_cnt) { - return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, + return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt, &smp->hop_ptr, smp->hop_cnt, smp->route.dr.initial_path, smp->route.dr.return_path, diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index aff96bac49b4..33c91c8a16e9 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h @@ -51,12 +51,12 @@ enum smi_forward_action { IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */ }; -enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, +enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch, int port_num, int phys_port_cnt); int smi_get_fwd_port(struct ib_smp *smp); extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - u8 node_type, int port_num); + bool is_switch, int port_num); /* * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index ed6b6c85c334..0b84a9cdfe5b 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device, goto err_put; } - if (device->node_type == RDMA_NODE_IB_SWITCH) { + if (rdma_cap_ib_switch(device)) { ret = add_port(device, 0, port_callback); if (ret) goto err_put; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 62c24b1452b8..009481073644 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp) return 0; } +static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES); static void ib_ucm_release_dev(struct device *dev) { struct ib_ucm_device *ucm_dev; @@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev) if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) clear_bit(ucm_dev->devnum, dev_map); else - clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); + clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map); kfree(ucm_dev); } @@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); static dev_t overflow_maj; -static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES); static int find_overflow_devnum(void) { int ret; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ad45469f7582..29b21213ea75 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2) /* Acquire mutex's based on pointer comparison to prevent deadlock. */ if (file1 < file2) { mutex_lock(&file1->mut); - mutex_lock(&file2->mut); + mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING); } else { mutex_lock(&file2->mut); - mutex_lock(&file1->mut); + mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING); } } @@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void) device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); idr_destroy(&ctx_idr); + idr_destroy(&multicast_idr); } module_init(ucma_init); diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c index 12b5bc23832b..376b031c2c7f 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c @@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index 948188e37f95..ad3a926ab3c5 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 48253b839a6f..30ba49c4a98c 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd) spin_lock_init(&idev->qp_table.lock); spin_lock_init(&idev->lk_table.lock); - idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); + idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); /* Set the prefix to the default value (see ch. 4.1.1) */ - idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); + idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL); ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); if (ret) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 85a50df2f203..68b3dfa922bf 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct mlx4_ib_dev *dev = to_mdev(ibdev); const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; + enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; - switch (rdma_port_get_link_layer(ibdev, port_num)) { - case IB_LINK_LAYER_INFINIBAND: - if (!mlx4_is_slave(dev->dev)) - return ib_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); - case IB_LINK_LAYER_ETHERNET: - return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); - default: - return -EINVAL; + /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA + * queries, should be called only by VFs and for that specific purpose + */ + if (link == IB_LINK_LAYER_INFINIBAND) { + if (mlx4_is_slave(dev->dev) && + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && + in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) + return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); + + return ib_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); } + + if (link == IB_LINK_LAYER_ETHERNET) + return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, + in_grh, in_mad, out_mad); + + return -EINVAL; } static void send_handler(struct ib_mad_agent *agent, diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 067a691ecbed..8be6db816460 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; props->timestamp_mask = 0xFFFFFFFFFFFFULL; - err = mlx4_get_internal_clock_params(dev->dev, &clock_params); - if (err) - goto out; + if (!mlx4_is_slave(dev->dev)) + err = mlx4_get_internal_clock_params(dev->dev, &clock_params); if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { - resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; resp.response_length += sizeof(resp.hca_core_clock_offset); - resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; + if (!err && !mlx4_is_slave(dev->dev)) { + resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; + resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; + } } if (uhw->outlen) { @@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); if (!dm) { pr_err("failed to allocate memory for tunneling qp update\n"); - goto out; + return; } for (i = 0; i < ports; i++) { dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); if (!dm[i]) { pr_err("failed to allocate memory for tunneling qp update work struct\n"); - for (i = 0; i < dev->caps.num_ports; i++) { - if (dm[i]) - kfree(dm[i]); - } + while (--i >= 0) + kfree(dm[i]); goto out; } - } - /* initialize or tear down tunnel QPs for the slave */ - for (i = 0; i < ports; i++) { INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); dm[i]->port = first_port + i + 1; dm[i]->slave = slave; dm[i]->do_init = do_init; dm[i]->dev = ibdev; - spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); - if (!ibdev->sriov.is_going_down) + } + /* initialize or tear down tunnel QPs for the slave */ + spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); + if (!ibdev->sriov.is_going_down) { + for (i = 0; i < ports; i++) queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); + } else { + spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); + for (i = 0; i < ports; i++) + kfree(dm[i]); } out: kfree(dm); diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 01fc97db45d6..b84d13a487cc 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 6b2418b74c99..7c3f2fb44ba5 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; /* Forward locally generated traps to the SM */ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 9047af429906..8a3ad170d790 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi int rc = arpindex; struct net_device *netdev; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; + __be32 dst_ipaddr = htonl(dst_ip); - rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); + rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0); if (IS_ERR(rt)) { printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", __func__, dst_ip); @@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi else netdev = nesvnic->netdev; - neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); + neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); rcu_read_lock(); if (neigh) { diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 02120d340d50..4713dd7ed764 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr, (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( - (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); + (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]); } else { cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 4bafa15708d0..29b27675dd70 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -215,8 +215,9 @@ int ocrdma_process_mad(struct ib_device *ibdev, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_PERF_MGMT: diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 8a1398b253a2..d98a707a5eb9 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -696,6 +696,7 @@ static void __exit ocrdma_exit_module(void) ocrdma_unregister_inet6addr_notifier(); ocrdma_unregister_inetaddr_notifier(); ocrdma_rem_debugfs(); + idr_destroy(&ocrdma_dev_id); } module_init(ocrdma_init_module); diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 05e3242d8442..9625e7c438e5 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; - BUG_ON(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad)); + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index bd94b0a6e9e5..79859c4d43c9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -239,7 +239,7 @@ struct ipoib_cm_tx { struct net_device *dev; struct ipoib_neigh *neigh; struct ipoib_path *path; - struct ipoib_cm_tx_buf *tx_ring; + struct ipoib_tx_buf *tx_ring; unsigned tx_head; unsigned tx_tail; unsigned long flags; @@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev); void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev); +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req); +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req); + +static inline void ipoib_build_sge(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) +{ + int i, off; + struct sk_buff *skb = tx_req->skb; + skb_frag_t *frags = skb_shinfo(skb)->frags; + int nr_frags = skb_shinfo(skb)->nr_frags; + u64 *mapping = tx_req->mapping; + + if (skb_headlen(skb)) { + priv->tx_sge[0].addr = mapping[0]; + priv->tx_sge[0].length = skb_headlen(skb); + off = 1; + } else + off = 0; + + for (i = 0; i < nr_frags; ++i) { + priv->tx_sge[i + off].addr = mapping[i + off]; + priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); + } + priv->tx_wr.num_sge = nr_frags + off; +} + #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index cf32a778e7d0..ee39be6ccfb0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -694,14 +694,12 @@ repost: static inline int post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, - u64 addr, int len) + struct ipoib_tx_buf *tx_req) { struct ib_send_wr *bad_wr; - priv->tx_sge[0].addr = addr; - priv->tx_sge[0].length = len; + ipoib_build_sge(priv, tx_req); - priv->tx_wr.num_sge = 1; priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); @@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv, void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(dev); - struct ipoib_cm_tx_buf *tx_req; - u64 addr; + struct ipoib_tx_buf *tx_req; int rc; if (unlikely(skb->len > tx->mtu)) { @@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ */ tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; - addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); - if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { + + if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } - tx_req->mapping = addr; - skb_orphan(skb); skb_dst_drop(skb); - rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), - addr, skb->len); + rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; - ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); } else { dev->trans_start = jiffies; @@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_cm_tx *tx = wc->qp->qp_context; unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; - struct ipoib_cm_tx_buf *tx_req; + struct ipoib_tx_buf *tx_req; unsigned long flags; ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", @@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &tx->tx_ring[wr_id]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); /* FIXME: is this right? Shouldn't we only increment on success? */ ++dev->stats.tx_packets; @@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ struct ib_qp *tx_qp; + if (dev->features & NETIF_F_SG) + attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; + tx_qp = ib_create_qp(priv->pd, &attr); if (PTR_ERR(tx_qp) == -EINVAL) { ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", @@ -1170,7 +1167,7 @@ err_tx: static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) { struct ipoib_dev_priv *priv = netdev_priv(p->dev); - struct ipoib_cm_tx_buf *tx_req; + struct ipoib_tx_buf *tx_req; unsigned long begin; ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", @@ -1197,8 +1194,7 @@ timeout: while ((int) p->tx_tail - (int) p->tx_head < 0) { tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; - ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, - DMA_TO_DEVICE); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++p->tx_tail; netif_tx_lock_bh(p->dev); @@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work) spin_unlock_irq(&priv->lock); } - static ssize_t show_mode(struct device *d, struct device_attribute *attr, char *buf) { diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 63b92cbb29ad..d266667ca9b8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -263,8 +263,7 @@ repost: "for buf %d\n", wr_id); } -static int ipoib_dma_map_tx(struct ib_device *ca, - struct ipoib_tx_buf *tx_req) +int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; @@ -305,8 +304,8 @@ partial_error: return -EIO; } -static void ipoib_dma_unmap_tx(struct ib_device *ca, - struct ipoib_tx_buf *tx_req) +void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, + struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; @@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca, int off; if (skb_headlen(skb)) { - ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), + DMA_TO_DEVICE); off = 1; } else off = 0; @@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca, for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), - DMA_TO_DEVICE); + ib_dma_unmap_page(priv->ca, mapping[i + off], + skb_frag_size(frag), DMA_TO_DEVICE); } } @@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &priv->tx_ring[wr_id]; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; @@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv, void *head, int hlen) { struct ib_send_wr *bad_wr; - int i, off; struct sk_buff *skb = tx_req->skb; - skb_frag_t *frags = skb_shinfo(skb)->frags; - int nr_frags = skb_shinfo(skb)->nr_frags; - u64 *mapping = tx_req->mapping; - if (skb_headlen(skb)) { - priv->tx_sge[0].addr = mapping[0]; - priv->tx_sge[0].length = skb_headlen(skb); - off = 1; - } else - off = 0; + ipoib_build_sge(priv, tx_req); - for (i = 0; i < nr_frags; ++i) { - priv->tx_sge[i + off].addr = mapping[i + off]; - priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); - } - priv->tx_wr.num_sge = nr_frags + off; priv->tx_wr.wr_id = wr_id; priv->tx_wr.wr.ud.remote_qpn = qpn; priv->tx_wr.wr.ud.ah = address; @@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; --priv->tx_outstanding; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) netif_wake_queue(dev); @@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev) while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; - ipoib_dma_unmap_tx(priv->ca, tx_req); + ipoib_dma_unmap_tx(priv, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; @@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv) } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, - enum ipoib_flush_level level) + enum ipoib_flush_level level, + int nesting) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; int result; - down_read(&priv->vlan_rwsem); + down_read_nested(&priv->vlan_rwsem, nesting); /* * Flush any child interfaces too -- they might be up even if * the parent is down. */ list_for_each_entry(cpriv, &priv->child_intfs, list) - __ipoib_ib_dev_flush(cpriv, level); + __ipoib_ib_dev_flush(cpriv, level, nesting + 1); up_read(&priv->vlan_rwsem); @@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); } void ipoib_ib_dev_flush_normal(struct work_struct *work) @@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) @@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); - __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); + __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); } void ipoib_ib_dev_cleanup(struct net_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index da149c278cb8..b2943c84a5dd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu struct ipoib_dev_priv *priv = netdev_priv(dev); if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) - features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); return features; } @@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) ipoib_warn(priv, "enabling connected mode " "will cause multicast packet drops\n"); netdev_update_features(dev); + dev_set_mtu(dev, ipoib_cm_max_mtu(dev)); rtnl_unlock(); priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; @@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format, SET_NETDEV_DEV(priv->dev, hca->dma_device); priv->dev->dev_id = port - 1; - if (!ib_query_port(hca, port, &attr)) + result = ib_query_port(hca, port, &attr); + if (!result) priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); else { printk(KERN_WARNING "%s: ib_query_port %d failed\n", @@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format, goto device_init_failed; } - if (ipoib_set_dev_features(priv, hca)) + result = ipoib_set_dev_features(priv, hca); + if (result) goto device_init_failed; /* @@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device) struct list_head *dev_list; struct net_device *dev; struct ipoib_dev_priv *priv; - int s, e, p; + int p; int count = 0; dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); @@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device) INIT_LIST_HEAD(dev_list); - if (device->node_type == RDMA_NODE_IB_SWITCH) { - s = 0; - e = 0; - } else { - s = 1; - e = device->phys_port_cnt; - } - - for (p = s; p <= e; ++p) { + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { if (!rdma_protocol_ib(device, p)) continue; dev = ipoib_add_port("ib%d", device, p); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 267dc4f75502..31a20b462266 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp) { int tmo, res; - if (strncmp(val, "off", 3) != 0) { - res = kstrtoint(val, 0, &tmo); - if (res) - goto out; - } else { - tmo = -1; - } + res = srp_parse_tmo(&tmo, val); + if (res) + goto out; + if (kp->arg == &srp_reconnect_delay) res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, srp_dev_loss_tmo); @@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device) struct srp_device *srp_dev; struct ib_device_attr *dev_attr; struct srp_host *host; - int mr_page_shift, s, e, p; + int mr_page_shift, p; u64 max_pages_per_mr; dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); @@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device) if (IS_ERR(srp_dev->mr)) goto err_pd; - if (device->node_type == RDMA_NODE_IB_SWITCH) { - s = 0; - e = 0; - } else { - s = 1; - e = device->phys_port_cnt; - } - - for (p = s; p <= e; ++p) { + for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { host = srp_add_port(srp_dev, p); if (host) list_add_tail(&host->list, &srp_dev->dev_list); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 82897ca17f32..60ff0a2390e5 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad) int i; ioui = (struct ib_dm_iou_info *)mad->data; - ioui->change_id = __constant_cpu_to_be16(1); + ioui->change_id = cpu_to_be16(1); ioui->max_controllers = 16; /* set present for slot 1 and empty for the rest */ @@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, if (!slot || slot > 16) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); return; } if (slot > 2) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); return; } @@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot, iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); iocp->subsys_device_id = 0x0; - iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); - iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); - iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); - iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); + iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS); + iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS); + iocp->protocol = cpu_to_be16(SRP_PROTOCOL); + iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION); iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); iocp->rdma_read_depth = 4; iocp->send_size = cpu_to_be32(srp_max_req_size); @@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid, if (!slot || slot > 16) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); return; } if (slot > 2 || lo > hi || hi > 1) { mad->mad_hdr.status - = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + = cpu_to_be16(DM_MAD_STATUS_NO_IOC); return; } @@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, break; default: rsp_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); break; } } @@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, break; case IB_MGMT_METHOD_SET: dm_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); break; default: dm_mad->mad_hdr.status = - __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); + cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); break; } @@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, memset(srp_rsp, 0, sizeof *srp_rsp); srp_rsp->opcode = SRP_RSP; srp_rsp->req_lim_delta = - __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); srp_rsp->tag = tag; srp_rsp->status = status; @@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, memset(srp_rsp, 0, sizeof *srp_rsp); srp_rsp->opcode = SRP_RSP; - srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 - + atomic_xchg(&ch->req_lim_delta, 0)); + srp_rsp->req_lim_delta = + cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); srp_rsp->tag = tag; srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; @@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) switch (len) { case 8: if ((*((__be64 *)lun) & - __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) + cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) goto out_err; break; case 4: @@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, } if (it_iu_len > srp_max_req_size || it_iu_len < 64) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); ret = -EINVAL; pr_err("rejected SRP_LOGIN_REQ because its" " length (%d bytes) is out of range (%d .. %d)\n", @@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, } if (!sport->enabled) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); ret = -EINVAL; pr_err("rejected SRP_LOGIN_REQ because the target port" " has not yet been enabled\n"); @@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) || *(__be64 *)(req->target_port_id + 8) != cpu_to_be64(srpt_service_guid)) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); ret = -ENOMEM; pr_err("rejected SRP_LOGIN_REQ because it" " has an invalid target port identifier.\n"); @@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ch = kzalloc(sizeof *ch, GFP_KERNEL); if (!ch) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); ret = -ENOMEM; goto reject; @@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ret = srpt_create_ch_ib(ch); if (ret) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because creating" " a new RDMA channel failed.\n"); goto free_ring; @@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ret = srpt_ch_qp_rtr(ch, ch->qp); if (ret) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_err("rejected SRP_LOGIN_REQ because enabling" " RTR failed (error code = %d)\n", ret); goto destroy_ib; @@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (!nacl) { pr_info("Rejected login because no ACL has been" " configured yet for initiator %s.\n", ch->sess_name); - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); goto destroy_ib; } ch->sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(ch->sess)) { - rej->reason = __constant_cpu_to_be32( - SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + rej->reason = cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); pr_debug("Failed to create session\n"); goto deregister_session; } @@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, rsp->max_it_iu_len = req->req_it_iu_len; rsp->max_ti_iu_len = req->req_it_iu_len; ch->max_ti_iu_len = it_iu_len; - rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT - | SRP_BUF_FORMAT_INDIRECT); + rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); rsp->req_lim_delta = cpu_to_be32(ch->rq_size); atomic_set(&ch->req_lim, ch->rq_size); atomic_set(&ch->req_lim_delta, 0); @@ -2655,8 +2654,8 @@ free_ch: reject: rej->opcode = SRP_LOGIN_REJ; rej->tag = req->tag; - rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT - | SRP_BUF_FORMAT_INDIRECT); + rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, (void *)rej, sizeof *rej); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 1b7e155869f6..c00e2db351ba 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -75,6 +75,13 @@ struct its_node { #define ITS_ITT_ALIGN SZ_256 +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; +}; + /* * The ITS view of a device - belongs to an ITS, a collection, owns an * interrupt translation table, and a list of interrupts. @@ -82,11 +89,8 @@ struct its_node { struct its_device { struct list_head entry; struct its_node *its; - struct its_collection *collection; + struct event_lpi_map event_map; void *itt; - unsigned long *lpi_map; - irq_hw_number_t lpi_base; - int nr_lpis; u32 nr_ites; u32 device_id; }; @@ -99,6 +103,14 @@ static struct rdists *gic_rdists; #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + /* * ITS command descriptors - parameters to be encoded in a command * block. @@ -134,7 +146,7 @@ struct its_cmd_desc { struct { struct its_device *dev; struct its_collection *col; - u32 id; + u32 event_id; } its_movi_cmd; struct { @@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, its_fixup_cmd(cmd); - return desc->its_mapd_cmd.dev->collection; + return NULL; } static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, @@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapvi_cmd.dev, + desc->its_mapvi_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_MAPVI); its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); - its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id); + its_encode_collection(cmd, col->col_id); its_fixup_cmd(cmd); - return desc->its_mapvi_cmd.dev->collection; + return col; } static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_MOVI); its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); - its_encode_event_id(cmd, desc->its_movi_cmd.id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); its_fixup_cmd(cmd); - return desc->its_movi_cmd.dev->collection; + return col; } static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_DISCARD); its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); its_encode_event_id(cmd, desc->its_discard_cmd.event_id); its_fixup_cmd(cmd); - return desc->its_discard_cmd.dev->collection; + return col; } static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + its_encode_cmd(cmd, GITS_CMD_INV); its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); its_encode_event_id(cmd, desc->its_inv_cmd.event_id); its_fixup_cmd(cmd); - return desc->its_inv_cmd.dev->collection; + return col; } static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, @@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev, desc.its_movi_cmd.dev = dev; desc.its_movi_cmd.col = col; - desc.its_movi_cmd.id = id; + desc.its_movi_cmd.event_id = id; its_send_single_command(dev->its, its_build_movi_cmd, &desc); } @@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col) static inline u32 its_get_event_id(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); - return d->hwirq - its_dev->lpi_base; + return d->hwirq - its_dev->event_map.lpi_base; } static void lpi_set_config(struct irq_data *d, bool enable) @@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); - its_dev->collection = target_col; + its_dev->event_map.col_map[id] = cpu; return IRQ_SET_MASK_OK_DONE; } @@ -713,8 +745,10 @@ out: return bitmap; } -static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) +static void its_lpi_free(struct event_lpi_map *map) { + int base = map->lpi_base; + int nr_ids = map->nr_lpis; int lpi; spin_lock(&lpi_lock); @@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) spin_unlock(&lpi_lock); - kfree(bitmap); + kfree(map->lpi_map); + kfree(map->col_map); } /* @@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, struct its_device *dev; unsigned long *lpi_map; unsigned long flags; + u16 *col_map = NULL; void *itt; int lpi_base; int nr_lpis; int nr_ites; - int cpu; int sz; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); - if (!dev || !itt || !lpi_map) { + if (!dev || !itt || !lpi_map || !col_map) { kfree(dev); kfree(itt); kfree(lpi_map); + kfree(col_map); return NULL; } dev->its = its; dev->itt = itt; dev->nr_ites = nr_ites; - dev->lpi_map = lpi_map; - dev->lpi_base = lpi_base; - dev->nr_lpis = nr_lpis; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); @@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, list_add(&dev->entry, &its->its_device_list); raw_spin_unlock_irqrestore(&its->lock, flags); - /* Bind the device to the first possible CPU */ - cpu = cpumask_first(cpu_online_mask); - dev->collection = &its->collections[cpu]; - /* Map device to its ITT */ its_send_mapd(dev, 1); @@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) { int idx; - idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis); - if (idx == dev->nr_lpis) + idx = find_first_zero_bit(dev->event_map.lpi_map, + dev->event_map.nr_lpis); + if (idx == dev->event_map.nr_lpis) return -ENOSPC; - *hwirq = dev->lpi_base + idx; - set_bit(idx, dev->lpi_map); + *hwirq = dev->event_map.lpi_base + idx; + set_bit(idx, dev->event_map.lpi_map); return 0; } @@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq, &its_irq_chip, its_dev); dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", - (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i); + (int)(hwirq - its_dev->event_map.lpi_base), + (int)hwirq, virq + i); } return 0; @@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + /* Bind the LPI to the first possible CPU */ + its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); + /* Map the GIC IRQ and event to the device */ its_send_mapvi(its_dev, d->hwirq, event); } @@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, u32 event = its_get_event_id(data); /* Mark interrupt index as unused */ - clear_bit(event, its_dev->lpi_map); + clear_bit(event, its_dev->event_map.lpi_map); /* Nuke the entry in the domain */ irq_domain_reset_irq_data(data); } /* If all interrupts have been freed, start mopping the floor */ - if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) { - its_lpi_free(its_dev->lpi_map, - its_dev->lpi_base, - its_dev->nr_lpis); + if (bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(&its_dev->event_map); /* Unmap device/itt */ its_send_mapd(its_dev, 0); diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index a45121546caf..acb721b31bcf 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -2,7 +2,7 @@ * SPEAr platform shared irq layer source file * * Copyright (C) 2009-2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * Copyright (C) 2012 ST Microelectronics * Shiraz Hashim <shiraz.linux.kernel@gmail.com> diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8c91fd5eb6fd..375be509e95f 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) cs->hw.ser->tty = tty; atomic_set(&cs->hw.ser->refcnt, 1); init_completion(&cs->hw.ser->dead_cmp); - tty->disc_data = cs; + /* Set the amount of data we're willing to receive per call + * from the hardware driver to half of the input buffer size + * to leave some reserve. + * Note: We don't do flow control towards the hardware driver. + * If more data is received than will fit into the input buffer, + * it will be dropped and an error will be logged. This should + * never happen as the device is slow and the buffer size ample. + */ + tty->receive_room = RBUFSIZE/2; + /* OK.. Initialization of the datastructures and the HW is done.. Now * startup system and notify the LL that we are ready to run */ @@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty) } /* - * Read on the tty. - * Unused, received data goes only to the Gigaset driver. - */ -static ssize_t -gigaset_tty_read(struct tty_struct *tty, struct file *file, - unsigned char __user *buf, size_t count) -{ - return -EAGAIN; -} - -/* - * Write on the tty. - * Unused, transmit data comes only from the Gigaset driver. - */ -static ssize_t -gigaset_tty_write(struct tty_struct *tty, struct file *file, - const unsigned char *buf, size_t count) -{ - return -EAGAIN; -} - -/* * Ioctl on the tty. * Called in process context only. * May be re-entered by multiple ioctl calling threads. @@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = { .open = gigaset_tty_open, .close = gigaset_tty_close, .hangup = gigaset_tty_hangup, - .read = gigaset_tty_read, - .write = gigaset_tty_write, .ioctl = gigaset_tty_ioctl, .receive_buf = gigaset_tty_receive, .write_wakeup = gigaset_tty_wakeup, diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index a08e3eeac3c5..79a6d63e8ed3 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list) do { \ set_closure_fn(_cl, _fn, _wq); \ closure_sub(_cl, CLOSURE_RUNNING + 1); \ - return; \ } while (0) /** @@ -349,7 +348,6 @@ do { \ do { \ set_closure_fn(_cl, _fn, _wq); \ closure_queue(_cl); \ - return; \ } while (0) /** @@ -365,7 +363,6 @@ do { \ do { \ set_closure_fn(_cl, _destructor, NULL); \ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ - return; \ } while (0) /** diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index cb64e64a4789..bf6a9ca18403 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) } while (n != bio); continue_at(&s->cl, bch_bio_submit_split_done, NULL); + return; submit: generic_make_request(bio); } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ce64fc851251..418607a6ba33 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl) if (!w->need_write) { closure_return_with_destructor(cl, journal_write_unlock); + return; } else if (journal_full(&c->journal)) { journal_reclaim(c); spin_unlock(&c->journal.lock); btree_flush_write(c); continue_at(cl, journal_write, system_wq); + return; } c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 4afb2d26b148..f292790997d7 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl) if (journal_ref) atomic_dec_bug(journal_ref); - if (!op->insert_data_done) + if (!op->insert_data_done) { continue_at(cl, bch_data_insert_start, op->wq); + return; + } bch_keylist_free(&op->insert_keys); closure_return(cl); @@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl) /* 1 for the device pointer and 1 for the chksum */ if (bch_keylist_realloc(&op->insert_keys, 3 + (op->csum ? 1 : 0), - op->c)) + op->c)) { continue_at(cl, bch_data_insert_keys, op->wq); + return; + } k = op->insert_keys.top; bkey_init(k); @@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl) op->insert_data_done = true; continue_at(cl, bch_data_insert_keys, op->wq); + return; err: /* bch_alloc_sectors() blocks if s->writeback = true */ BUG_ON(op->writeback); @@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl) ret = bch_btree_map_keys(&s->op, s->iop.c, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), cache_lookup_fn, MAP_END_KEY); - if (ret == -EAGAIN) + if (ret == -EAGAIN) { continue_at(cl, cache_lookup, bcache_wq); + return; + } closure_return(cl); } @@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) continue_at_nobarrier(&s->cl, flash_dev_nodata, bcache_wq); + return; } else if (rw) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &KEY(d->id, bio->bi_iter.bi_sector, 0), diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1b4e1756b169..b680da5d7b93 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -424,7 +424,6 @@ static void free_migration(struct dm_cache_migration *mg) wake_up(&cache->migration_wait); mempool_free(mg, cache->migration_pool); - wake_worker(cache); } static int prealloc_data_structs(struct cache *cache, struct prealloc *p) @@ -1947,6 +1946,7 @@ static int commit_if_needed(struct cache *cache) static void process_deferred_bios(struct cache *cache) { + bool prealloc_used = false; unsigned long flags; struct bio_list bios; struct bio *bio; @@ -1981,13 +1981,16 @@ static void process_deferred_bios(struct cache *cache) process_discard_bio(cache, &structs, bio); else process_bio(cache, &structs, bio); + prealloc_used = true; } - prealloc_free_structs(cache, &structs); + if (prealloc_used) + prealloc_free_structs(cache, &structs); } static void process_deferred_cells(struct cache *cache) { + bool prealloc_used = false; unsigned long flags; struct dm_bio_prison_cell *cell, *tmp; struct list_head cells; @@ -2015,9 +2018,11 @@ static void process_deferred_cells(struct cache *cache) } process_cell(cache, &structs, cell); + prealloc_used = true; } - prealloc_free_structs(cache, &structs); + if (prealloc_used) + prealloc_free_structs(cache, &structs); } static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) @@ -2062,7 +2067,7 @@ static void process_deferred_writethrough_bios(struct cache *cache) static void writeback_some_dirty_blocks(struct cache *cache) { - int r = 0; + bool prealloc_used = false; dm_oblock_t oblock; dm_cblock_t cblock; struct prealloc structs; @@ -2072,23 +2077,21 @@ static void writeback_some_dirty_blocks(struct cache *cache) memset(&structs, 0, sizeof(structs)); while (spare_migration_bandwidth(cache)) { - if (prealloc_data_structs(cache, &structs)) - break; + if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) + break; /* no work to do */ - r = policy_writeback_work(cache->policy, &oblock, &cblock, busy); - if (r) - break; - - r = get_cell(cache, oblock, &structs, &old_ocell); - if (r) { + if (prealloc_data_structs(cache, &structs) || + get_cell(cache, oblock, &structs, &old_ocell)) { policy_set_dirty(cache->policy, oblock); break; } writeback(cache, &structs, oblock, cblock, old_ocell); + prealloc_used = true; } - prealloc_free_structs(cache, &structs); + if (prealloc_used) + prealloc_free_structs(cache, &structs); } /*---------------------------------------------------------------- @@ -3496,7 +3499,7 @@ static void cache_resume(struct dm_target *ti) * <#demotions> <#promotions> <#dirty> * <#features> <features>* * <#core args> <core args> - * <policy name> <#policy args> <policy args>* <cache metadata mode> + * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check> */ static void cache_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) @@ -3582,6 +3585,11 @@ static void cache_status(struct dm_target *ti, status_type_t type, else DMEMIT("rw "); + if (dm_cache_metadata_needs_check(cache->cmd)) + DMEMIT("needs_check "); + else + DMEMIT("- "); + break; case STATUSTYPE_TABLE: @@ -3820,7 +3828,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 7, 0}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c33f61a4cc28..1c50c580215c 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/sort.h> #include <linux/rbtree.h> @@ -268,7 +269,7 @@ struct pool { process_mapping_fn process_prepared_mapping; process_mapping_fn process_prepared_discard; - struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE]; + struct dm_bio_prison_cell **cell_sort_array; }; static enum pool_mode get_pool_mode(struct pool *pool); @@ -2281,18 +2282,23 @@ static void do_waker(struct work_struct *ws) queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); } +static void notify_of_pool_mode_change_to_oods(struct pool *pool); + /* * We're holding onto IO to allow userland time to react. After the * timeout either the pool will have been resized (and thus back in - * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. + * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space. */ static void do_no_space_timeout(struct work_struct *ws) { struct pool *pool = container_of(to_delayed_work(ws), struct pool, no_space_timeout); - if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) - set_pool_mode(pool, PM_READ_ONLY); + if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { + pool->pf.error_if_no_space = true; + notify_of_pool_mode_change_to_oods(pool); + error_retry_list(pool); + } } /*----------------------------------------------------------------*/ @@ -2370,6 +2376,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) dm_device_name(pool->pool_md), new_mode); } +static void notify_of_pool_mode_change_to_oods(struct pool *pool) +{ + if (!pool->pf.error_if_no_space) + notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)"); + else + notify_of_pool_mode_change(pool, "out-of-data-space (error IO)"); +} + static bool passdown_enabled(struct pool_c *pt) { return pt->adjusted_pf.discard_passdown; @@ -2454,7 +2468,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) * frequently seeing this mode. */ if (old_mode != new_mode) - notify_of_pool_mode_change(pool, "out-of-data-space"); + notify_of_pool_mode_change_to_oods(pool); pool->process_bio = process_bio_read_only; pool->process_discard = process_discard_bio; pool->process_cell = process_cell_read_only; @@ -2777,6 +2791,7 @@ static void __pool_destroy(struct pool *pool) { __pool_table_remove(pool); + vfree(pool->cell_sort_array); if (dm_pool_metadata_close(pool->pmd) < 0) DMWARN("%s: dm_pool_metadata_close() failed.", __func__); @@ -2889,6 +2904,13 @@ static struct pool *pool_create(struct mapped_device *pool_md, goto bad_mapping_pool; } + pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); + if (!pool->cell_sort_array) { + *error = "Error allocating cell sort array"; + err_p = ERR_PTR(-ENOMEM); + goto bad_sort_array; + } + pool->ref_count = 1; pool->last_commit_jiffies = jiffies; pool->pool_md = pool_md; @@ -2897,6 +2919,8 @@ static struct pool *pool_create(struct mapped_device *pool_md, return pool; +bad_sort_array: + mempool_destroy(pool->mapping_pool); bad_mapping_pool: dm_deferred_set_destroy(pool->all_io_ds); bad_all_io_ds: @@ -3714,6 +3738,7 @@ static void emit_flags(struct pool_features *pf, char *result, * Status line is: * <transaction id> <used metadata sectors>/<total metadata sectors> * <used data sectors>/<total data sectors> <held metadata root> + * <pool mode> <discard config> <no space config> <needs_check> */ static void pool_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) @@ -3815,6 +3840,11 @@ static void pool_status(struct dm_target *ti, status_type_t type, else DMEMIT("queue_if_no_space "); + if (dm_pool_metadata_needs_check(pool->pmd)) + DMEMIT("needs_check "); + else + DMEMIT("- "); + break; case STATUSTYPE_TABLE: @@ -3918,7 +3948,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 15, 0}, + .version = {1, 16, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -4305,7 +4335,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type thin_target = { .name = "thin", - .version = {1, 15, 0}, + .version = {1, 16, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f331d888e7f5..ab37ae114e94 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) */ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) { - int nr_requests_pending; - atomic_dec(&md->pending[rw]); /* nudge anyone waiting on suspend queue */ - nr_requests_pending = md_in_flight(md); - if (!nr_requests_pending) + if (!md_in_flight(md)) wake_up(&md->wait); /* @@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) if (run_queue) { if (md->queue->mq_ops) blk_mq_run_hw_queues(md->queue, true); - else if (!nr_requests_pending || - (nr_requests_pending >= md->queue->nr_congestion_on)) + else blk_run_queue_async(md->queue); } @@ -2281,8 +2277,6 @@ static void dm_init_old_md_queue(struct mapped_device *md) static void cleanup_mapped_device(struct mapped_device *md) { - cleanup_srcu_struct(&md->io_barrier); - if (md->wq) destroy_workqueue(md->wq); if (md->kworker_task) @@ -2294,6 +2288,8 @@ static void cleanup_mapped_device(struct mapped_device *md) if (md->bs) bioset_free(md->bs); + cleanup_srcu_struct(&md->io_barrier); + if (md->disk) { spin_lock(&_minor_lock); md->disk->private_data = NULL; diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index e04cfd2d60ef..9836c0ae897c 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, if (s < 0 && nr_center < -s) { /* not enough in central node */ - shift(left, center, nr_center); - s = nr_center - target; + shift(left, center, -nr_center); + s += nr_center; shift(left, right, s); nr_right += s; } else @@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, if (s > 0 && nr_center < s) { /* not enough in central node */ shift(center, right, nr_center); - s = target - nr_center; + s -= nr_center; shift(left, right, s); nr_left -= s; } else diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 200ac12a1d40..fdd3793e22f9 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root) int r; struct del_stack *s; - s = kmalloc(sizeof(*s), GFP_KERNEL); + s = kmalloc(sizeof(*s), GFP_NOIO); if (!s) return -ENOMEM; s->info = info; diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c index 5c054031c3f8..e14c8c9d189b 100644 --- a/drivers/mfd/stmpe-i2c.c +++ b/drivers/mfd/stmpe-i2c.c @@ -6,7 +6,7 @@ * * License Terms: GNU General Public License, version 2 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson - * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics + * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics */ #include <linux/i2c.h> diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c index a81badbaa917..6fdb30e84a2b 100644 --- a/drivers/mfd/stmpe-spi.c +++ b/drivers/mfd/stmpe-spi.c @@ -4,7 +4,7 @@ * Copyright (C) ST Microelectronics SA 2011 * * License Terms: GNU General Public License, version 2 - * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics + * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics */ #include <linux/spi/spi.h> @@ -146,4 +146,4 @@ module_exit(stmpe_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index df088343d60f..255a896769b8 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -4,7 +4,7 @@ * Support of SDHCI platform devices for spear soc family * * Copyright (C) 2010 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * Inspired by sdhci-pltfm.c * @@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = { module_platform_driver(sdhci_driver); MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1c6a773c87ea..0c627b4733ca 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); } +static struct slave *bond_get_old_active(struct bonding *bond, + struct slave *new_active) +{ + struct slave *slave; + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) { + if (slave == new_active) + continue; + + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) + return slave; + } + + return NULL; +} + /* bond_do_fail_over_mac * * Perform special MAC address swapping for fail_over_mac settings @@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, if (!new_active) return; + if (!old_active) + old_active = bond_get_old_active(bond, new_active); + if (old_active) { ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ether_addr_copy(saddr.sa_data, @@ -1725,9 +1745,16 @@ err_free: err_undo_flags: /* Enslave of first slave has failed and we need to fix master's mac */ - if (!bond_has_slaves(bond) && - ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) - eth_hw_addr_random(bond_dev); + if (!bond_has_slaves(bond)) { + if (ether_addr_equal_64bits(bond_dev->dev_addr, + slave_dev->dev_addr)) + eth_hw_addr_random(bond_dev); + if (bond_dev->type != ARPHRD_ETHER) { + ether_setup(bond_dev); + bond_dev->flags |= IFF_MASTER; + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; + } + } return res; } @@ -1916,6 +1943,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", bond_dev->name); + bond_remove_proc_entry(bond); unregister_netdevice(bond_dev); } return ret; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f4e40aa4d2a2..945c0955a967 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); } /** @@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) } at91_read_mb(dev, mb, cf); - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); } @@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) return 0; at91_poll_err_frame(dev, cf, reg_sr); - netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev) return; at91_irq_err_state(dev, cf, new_state); - netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_rx(skb); priv->can.state = new_state; } diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 27ad312e7abf..57dadd52b428 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc) cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) @@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) priv->can.state = state; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index c11d44984036..70a8cbb29e75 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) for (i = 0; i < cf->can_dlc; i++) cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static int cc770_err(struct net_device *dev, u8 status) @@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status) } } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 6201c5a1a884..b1e8d729851c 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) return 0; do_bus_err(dev, cf, reg_esr); - netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - netif_receive_skb(skb); - dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev) } flexcan_read_fifo(dev, cf); - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index e3d7e22a4fa0..db9538d4b358 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget) cf->data[i] = (u8)(slot[j] >> shift); } } - netif_receive_skb(skb); /* Update statistics and read pointer */ stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); } diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 32bd7f451aa4..7b92e911a616 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev) /* release receive buffer */ sja1000_write_cmdreg(priv, CMD_RRB); - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); can_led_event(dev, CAN_LED_EVENT_RX); } @@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) can_bus_off(dev); } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index a23a7af8eb9a..9a3f15cb7ef4 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl) memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); - netif_rx_ni(skb); sl->dev->stats.rx_packets++; sl->dev->stats.rx_bytes += cf.can_dlc; + netif_rx_ni(skb); } /* parse tty input stream */ diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index c1a95a34d62e..b7e83c212023 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi) if (ret) goto out_clk; - priv->power = devm_regulator_get(&spi->dev, "vdd"); - priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); + priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); + priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { ret = -EPROBE_DEFER; @@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev) struct spi_device *spi = to_spi_device(dev); struct mcp251x_priv *priv = spi_get_drvdata(spi); - if (priv->after_suspend & AFTER_SUSPEND_POWER) { + if (priv->after_suspend & AFTER_SUSPEND_POWER) mcp251x_power_enable(priv->power, 1); + + if (priv->after_suspend & AFTER_SUSPEND_UP) { + mcp251x_power_enable(priv->transceiver, 1); queue_work(priv->wq, &priv->restart_work); } else { - if (priv->after_suspend & AFTER_SUSPEND_UP) { - mcp251x_power_enable(priv->transceiver, 1); - queue_work(priv->wq, &priv->restart_work); - } else { - priv->after_suspend = 0; - } + priv->after_suspend = 0; } + priv->force_quit = 0; enable_irq(spi->irq); return 0; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index e95a9e1a889f..cf345cbfe819 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 866bac0ae7e9..2d390384ef3b 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) cf->data[i] = msg->msg.can_msg.msg[i]; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) @@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) stats->rx_errors++; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } /* diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 411c1af92c62..0e5a4493ba4f 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, cf->data[7] = rxerr; } - netif_rx(skb); - priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } } @@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, cf->data[i] = msg->msg.rx.data[i]; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } return; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 72427f21edff..6b94007ae052 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, hwts->hwtstamp = timeval_to_ktime(tv); } - netif_rx(skb); mc->netdev->stats.rx_packets++; mc->netdev->stats.rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } @@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - /* push the skb */ - netif_rx(skb); - /* update statistics */ mc->netdev->stats.rx_packets++; mc->netdev->stats.rx_bytes += cf->can_dlc; + /* push the skb */ + netif_rx(skb); return 0; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index dec51717635e..7d61b3279798 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; + netif_rx(skb); return 0; } @@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index dd52c7a4c80d..de95b1ccba3e 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } /* Read data and status frames */ @@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, else memcpy(cf->data, msg->data, cf->can_dlc); - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); can_led_event(priv->netdev, CAN_LED_EVENT_RX); } else { diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 3297604f8216..289e20443d83 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) } /* Include the pseudo-PHY address and the broadcast PHY address to - * divert reads towards our workaround + * divert reads towards our workaround. This is only required for + * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such + * that we can use the regular SWITCH_MDIO master controller instead. + * + * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask + * to have a 1:1 mapping between Port address and PHY address in order + * to utilize the slave_mii_bus instance to read from Port PHYs. This is + * not what we want here, so we initialize phys_mii_mask 0 to always + * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. */ - ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + if (of_machine_is_compatible("brcm,bcm7445d0")) + ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + else + ds->phys_mii_mask = 0; rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f394e4d4d9e0..5158375b7abd 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1175,7 +1175,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) newfid = __ffs(ps->fid_mask); ps->fid[port] = newfid; - ps->fid_mask &= (1 << newfid); + ps->fid_mask &= ~(1 << newfid); ps->bridge_mask[fid] &= ~(1 << port); ps->bridge_mask[newfid] = 1 << port; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ac27e24264a5..f557a2aaec23 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s) __raw_writeq(reg, port); port = s->sbm_base + R_MAC_ETHERNET_ADDR; -#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS - /* - * Pass1 SOCs do not receive packets addressed to the - * destination address in the R_MAC_ETHERNET_ADDR register. - * Set the value to zero. - */ - __raw_writeq(0, port); -#else __raw_writeq(reg, port); -#endif /* * Set the receive filter for no packets, and write values diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 42e20e5385ac..1f89c59b4353 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -24,7 +24,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> -#include <linux/pm_runtime.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -78,7 +77,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_RAEM_V 0x8 #define FEC_ENET_RAFL_V 0x8 #define FEC_ENET_OPD_V 0xFFF0 -#define FEC_MDIO_PM_TIMEOUT 100 /* ms */ static struct platform_device_id fec_devtype[] = { { @@ -1769,13 +1767,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; - struct device *dev = &fep->pdev->dev; unsigned long time_left; - int ret = 0; - - ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) - return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1791,30 +1783,18 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); - ret = -ETIMEDOUT; - goto out; + return -ETIMEDOUT; } - ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); - -out: - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); - - return ret; + /* return value */ + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); } static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; - struct device *dev = &fep->pdev->dev; unsigned long time_left; - int ret = 0; - - ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) - return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1831,13 +1811,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); - ret = -ETIMEDOUT; + return -ETIMEDOUT; } - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); - - return ret; + return 0; } static int fec_enet_clk_enable(struct net_device *ndev, bool enable) @@ -1849,6 +1826,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; if (fep->clk_enet_out) { ret = clk_prepare_enable(fep->clk_enet_out); if (ret) @@ -1872,6 +1852,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } } else { clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { @@ -1893,6 +1874,8 @@ failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); failed_clk_enet_out: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: clk_disable_unprepare(fep->clk_ahb); return ret; @@ -2864,14 +2847,10 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; - ret = pm_runtime_get_sync(&fep->pdev->dev); - if (IS_ERR_VALUE(ret)) - return ret; - pinctrl_pm_select_default_state(&fep->pdev->dev); ret = fec_enet_clk_enable(ndev, true); if (ret) - goto clk_enable; + return ret; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. @@ -2902,9 +2881,6 @@ err_enet_mii_probe: fec_enet_free_buffers(ndev); err_enet_alloc: fec_enet_clk_enable(ndev, false); -clk_enable: - pm_runtime_mark_last_busy(&fep->pdev->dev); - pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; } @@ -2927,9 +2903,6 @@ fec_enet_close(struct net_device *ndev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); - pm_runtime_mark_last_busy(&fep->pdev->dev); - pm_runtime_put_autosuspend(&fep->pdev->dev); - fec_enet_free_buffers(ndev); return 0; @@ -3415,10 +3388,6 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk; - ret = clk_prepare_enable(fep->clk_ipg); - if (ret) - goto failed_clk_ipg; - fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); @@ -3465,8 +3434,6 @@ fec_probe(struct platform_device *pdev) netif_carrier_off(ndev); fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); ret = register_netdev(ndev); if (ret) @@ -3480,12 +3447,6 @@ fec_probe(struct platform_device *pdev) fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); - - pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); - return 0; failed_register: @@ -3496,8 +3457,6 @@ failed_init: if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: - clk_disable_unprepare(fep->clk_ipg); -failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: failed_phy: @@ -3609,28 +3568,7 @@ failed_clk: return ret; } -static int __maybe_unused fec_runtime_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep = netdev_priv(ndev); - - clk_disable_unprepare(fep->clk_ipg); - - return 0; -} - -static int __maybe_unused fec_runtime_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep = netdev_priv(ndev); - - return clk_prepare_enable(fep->clk_ipg); -} - -static const struct dev_pm_ops fec_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) - SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) -}; +static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); static struct platform_driver fec_driver = { .driver = { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e4fb172d91a6..fe2299ac4f5c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { struct net_device *dev = pp->dev; - int rx_done, rx_filled; + int rx_done; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; @@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, rx_todo = rx_done; rx_done = 0; - rx_filled = 0; /* Fairness NAPI loop */ while (rx_done < rx_todo) { @@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, int rx_bytes, err; rx_done++; - rx_filled++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); data = (unsigned char *)rx_desc->buf_cookie; @@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, continue; } + /* Refill processing */ + err = mvneta_rx_refill(pp, rx_desc); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; + goto err_drop_frame; + } + skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); if (!skb) goto err_drop_frame; @@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, mvneta_rx_csum(pp, rx_status, skb); napi_gro_receive(&pp->napi, skb); - - /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc); - if (err) { - netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->missed++; - rx_filled--; - } } if (rcvd_pkts) { @@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } /* Update rxq management counters */ - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); return rx_done; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 47287c1cc7e0..779bb58a068e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; - struct sk_buff *skb; dma_addr_t dma_addr; - void *buffer; int i; priv->cur_rx[q] = 0; @@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q) memset(priv->rx_ring[q], 0, rx_ring_size); /* Build RX ring buffer */ for (i = 0; i < priv->num_rx_ring[q]; i++) { - priv->rx_skb[q][i] = NULL; - skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); - if (!skb) - break; - ravb_set_buffer_align(skb); /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; /* The size of the buffer should be on 16-byte boundary. */ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); - dma_addr = dma_map_single(&ndev->dev, skb->data, + dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, ALIGN(PKT_BUF_SZ, 16), DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, dma_addr)) { - dev_kfree_skb(skb); - break; - } - priv->rx_skb[q][i] = skb; + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(&ndev->dev, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->die_dt = DT_FEMPTY; } rx_desc = &priv->rx_ring[q][i]; rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->die_dt = DT_LINKFIX; /* type */ - priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ for (i = 0; i < priv->num_tx_ring[q]; i++) { - priv->tx_skb[q][i] = NULL; - priv->tx_buffers[q][i] = NULL; - buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); - if (!buffer) - break; - /* Aligned TX buffer */ - priv->tx_buffers[q][i] = buffer; tx_desc = &priv->tx_ring[q][i]; tx_desc->die_dt = DT_EEMPTY; } @@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + struct sk_buff *skb; int ring_size; + void *buffer; + int i; /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q) if (!priv->rx_skb[q] || !priv->tx_skb[q]) goto error; + for (i = 0; i < priv->num_rx_ring[q]; i++) { + skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); + if (!skb) + goto error; + ravb_set_buffer_align(skb); + priv->rx_skb[q][i] = skb; + } + /* Allocate rings for the aligned buffers */ priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], sizeof(*priv->tx_buffers[q]), GFP_KERNEL); if (!priv->tx_buffers[q]) goto error; + for (i = 0; i < priv->num_tx_ring[q]; i++) { + buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); + if (!buffer) + goto error; + /* Aligned TX buffer */ + priv->tx_buffers[q][i] = buffer; + } + /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, @@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) if (--boguscnt < 0) break; + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + if (desc_status & MSC_MC) stats->multicast++; @@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; - dma_sync_single_for_cpu(&ndev->dev, - le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), - DMA_FROM_DEVICE); + dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + ALIGN(PKT_BUF_SZ, 16), + DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; @@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), - DMA_FROM_DEVICE); dma_addr = dma_map_single(&ndev->dev, skb->data, le16_to_cpu(desc->ds_cc), DMA_FROM_DEVICE); skb_checksum_none_assert(skb); - if (dma_mapping_error(&ndev->dev, dma_addr)) { - dev_kfree_skb_any(skb); - break; - } + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(&ndev->dev, dma_addr)) + desc->ds_cc = cpu_to_le16(0); desc->dptr = cpu_to_le32(dma_addr); priv->rx_skb[q][entry] = skb; } @@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) u32 dma_addr; void *buffer; u32 entry; - u32 tccr; spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { @@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) dma_wmb(); desc->die_dt = DT_FSINGLE; - tccr = ravb_read(ndev, TCCR); - if (!(tccr & (TCCR_TSRQ0 << q))) - ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); + ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); priv->cur_tx[q]++; if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 50f7a7a26821..864b476f7fd5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device, if (res->mac) memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); - dev_set_drvdata(device, priv); + dev_set_drvdata(device, priv->dev); /* Verify driver arguments */ stmmac_verify_args(); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f335bf119ab5..d155bf2573cd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) static int cpsw_poll(struct napi_struct *napi, int budget) { struct cpsw_priv *priv = napi_to_priv(napi); - int num_tx, num_rx; - - num_tx = cpdma_chan_process(priv->txch, 128); + int num_rx; num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { @@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget) } } - if (num_rx || num_tx) - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", - num_rx, num_tx); + if (num_rx) + cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); return num_rx; } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 3e47202b9010..3ca87f26582a 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1617,11 +1617,11 @@ static int netcp_ndo_open(struct net_device *ndev) } mutex_unlock(&netcp_modules_lock); - netcp_rxpool_refill(netcp); napi_enable(&netcp->rx_napi); napi_enable(&netcp->tx_napi); knav_queue_enable_notify(netcp->tx_compl_q); knav_queue_enable_notify(netcp->rx_queue); + netcp_rxpool_refill(netcp); netif_tx_wake_all_queues(ndev); dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); return 0; diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 953a97492fab..9542b7bac61a 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -67,8 +67,6 @@ struct ipvl_dev { struct ipvl_port *port; struct net_device *phy_dev; struct list_head addrs; - int ipv4cnt; - int ipv6cnt; struct ipvl_pcpu_stats __percpu *pcpu_stats; DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); netdev_features_t sfeatures; @@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) return rcu_dereference(d->rx_handler_data); } +static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d) +{ + return rcu_dereference_bh(d->rx_handler_data); +} + static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) { return rtnl_dereference(d->rx_handler_data); @@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, const void *iaddr, bool is_v6); -void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); +void ipvlan_ht_addr_del(struct ipvl_addr *addr); #endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8afbedad620d..207f62e8de9a 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); } -void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) +void ipvlan_ht_addr_del(struct ipvl_addr *addr) { hlist_del_init_rcu(&addr->hlnode); - if (sync) - synchronize_rcu(); } struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, @@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); + struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); if (!port) goto out; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1acc283160d9..20b58bdecf75 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev) else dev->flags &= ~IFF_NOARP; - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry(addr, &ipvlan->addrs, anode) - ipvlan_ht_addr_add(ipvlan, addr); - } + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_add(ipvlan, addr); + return dev_uc_add(phy_dev, phy_dev->dev_addr); } @@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev) dev_uc_del(phy_dev, phy_dev->dev_addr); - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry(addr, &ipvlan->addrs, anode) - ipvlan_ht_addr_del(addr, !dev->dismantle); - } + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_del(addr); + return 0; } @@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->port = port; ipvlan->sfeatures = IPVLAN_FEATURES; INIT_LIST_HEAD(&ipvlan->addrs); - ipvlan->ipv4cnt = 0; - ipvlan->ipv6cnt = 0; /* TODO Probably put random address here to be presented to the * world but keep using the physical-dev address for the outgoing @@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) struct ipvl_dev *ipvlan = netdev_priv(dev); struct ipvl_addr *addr, *next; - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { - ipvlan_ht_addr_del(addr, !dev->dismantle); - list_del(&addr->anode); - } + list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { + ipvlan_ht_addr_del(addr); + list_del(&addr->anode); + kfree_rcu(addr, rcu); } + list_del_rcu(&ipvlan->pnode); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(ipvlan->phy_dev, dev); @@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); addr->atype = IPVL_IPV6; list_add_tail(&addr->anode, &ipvlan->addrs); - ipvlan->ipv6cnt++; + /* If the interface is not up, the address will be added to the hash * list by ipvlan_open. */ @@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) if (!addr) return; - ipvlan_ht_addr_del(addr, true); + ipvlan_ht_addr_del(addr); list_del(&addr->anode); - ipvlan->ipv6cnt--; - WARN_ON(ipvlan->ipv6cnt < 0); kfree_rcu(addr, rcu); return; @@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused, struct net_device *dev = (struct net_device *)if6->idev->dev; struct ipvl_dev *ipvlan = netdev_priv(dev); + /* FIXME IPv6 autoconf calls us from bh without RTNL */ + if (in_softirq()) + return NOTIFY_DONE; + if (!netif_is_ipvlan(dev)) return NOTIFY_DONE; @@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); addr->atype = IPVL_IPV4; list_add_tail(&addr->anode, &ipvlan->addrs); - ipvlan->ipv4cnt++; + /* If the interface is not up, the address will be added to the hash * list by ipvlan_open. */ @@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) if (!addr) return; - ipvlan_ht_addr_del(addr, true); + ipvlan_ht_addr_del(addr); list_del(&addr->anode); - ipvlan->ipv4cnt--; - WARN_ON(ipvlan->ipv4cnt < 0); kfree_rcu(addr, rcu); return; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index c7a12e2e07b7..8a3bf5469892 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev) return ret; } - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || + if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, DP83867_DEVADDR, phydev->addr); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 095ef3fe369a..46a14cbb0215 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { struct phy_device *phydev = to_phy_device(dev); struct phy_driver *phydrv = to_phy_driver(drv); + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); + int i; if (of_driver_match_device(dev, drv)) return 1; @@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) if (phydrv->match_phy_device) return phydrv->match_phy_device(phydev); - return (phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->phy_id & phydrv->phy_id_mask); + if (phydev->is_c45) { + for (i = 1; i < num_ids; i++) { + if (!(phydev->c45_ids.devices_in_package & (1 << i))) + continue; + + if ((phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->c45_ids.device_ids[i] & + phydrv->phy_id_mask)) + return 1; + } + return 0; + } else { + return (phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->phy_id & phydrv->phy_id_mask); + } } #ifdef CONFIG_PM diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f06c687c5429..1f7a7cd97e50 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -757,6 +757,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ + {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 63c7810e1545..7fbca37a1adf 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev) else vi->hdr_len = sizeof(struct virtio_net_hdr); - if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) vi->any_header_sg = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 5e15e8e10ed3..a31a6804dc34 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah) return; case AR9300_DEVID_QCA956X: ah->hw_version.macVersion = AR_SREV_VERSION_9561; + return; } val = REG_READ(ah, AR_SREV) & AR_SREV_ID; diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d56064861a9c..d45dc021cda2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 80fefe7d7b8c..3b8e85e51002 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev, hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_FAMILY_8000); - /* The byte order is little endian 16 bit, meaning 214365 */ - data->hw_addr[0] = hw_addr[1]; - data->hw_addr[1] = hw_addr[0]; - data->hw_addr[2] = hw_addr[3]; - data->hw_addr[3] = hw_addr[2]; - data->hw_addr[4] = hw_addr[5]; - data->hw_addr[5] = hw_addr[4]; + /* + * Store the MAC address from MAO section. + * No byte swapping is required in MAO section + */ + memcpy(data->hw_addr, hw_addr, ETH_ALEN); /* * Force the use of the OTP MAC address in case of reserved MAC diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 5e4cbdb44c60..737774a01c74 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -660,7 +660,8 @@ struct iwl_scan_config { * iwl_umac_scan_flags *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request * can be preempted by other scan requests with higher priority. - * The low priority scan is aborted. + * The low priority scan will be resumed when the higher proirity scan is + * completed. *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver * when scan starts. */ diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5de144968723..5000bfcded61 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); + if (type == IWL_MVM_SCAN_SCHED) + cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); + if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index d68dc697a4a0..26f076e82149 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; int ret; + static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; lockdep_assert_held(&mvm->mutex); @@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta->addr, ret); + sta ? sta->addr : zero_addr, ret); return ret; } diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index d24b6a83e68c..e472729e5f14 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, { lockdep_assert_held(&mvm->time_event_lock); - if (te_data->id == TE_MAX) + if (!te_data->vif) return; list_del(&te_data->list); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 7ba7a118ff5c..89116864d2a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, if (info->band == IEEE80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; else rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2ed1e4d2774d..9f65c1cff1b1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 3165 Series */ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, @@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 31f72a61cc3f..376b84e54ad7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -44,15 +44,6 @@ #include "iwl-io.h" #include "iwl-op-mode.h" -/* - * RX related structures and functions - */ -#define RX_NUM_QUEUES 1 -#define RX_POST_REQ_ALLOC 2 -#define RX_CLAIM_REQ_ALLOC 8 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) -#define RX_LOW_WATERMARK 8 - struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -86,29 +77,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @pool: + * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free - * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of RBDs with allocated RB ready for use - * @rx_used: list of RBDs with no RB attached + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: - * @pool: initial pool of iwl_rx_mem_buffer for the queue - * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; - u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -116,32 +107,6 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; -}; - -/** - * struct iwl_rb_allocator - Rx allocator - * @pool: initial pool of allocator - * @req_pending: number of requests the allcator had not processed yet - * @req_ready: number of requests honored and ready for claiming - * @rbd_allocated: RBDs with pages allocated and ready to be handled to - * the queue. This is a list of &struct iwl_rx_mem_buffer - * @rbd_empty: RBDs with no page attached for allocator use. This is a list - * of &struct iwl_rx_mem_buffer - * @lock: protects the rbd_allocated and rbd_empty lists - * @alloc_wq: work queue for background calls - * @rx_alloc: work struct for background calls - */ -struct iwl_rb_allocator { - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; - atomic_t req_pending; - atomic_t req_ready; - struct list_head rbd_allocated; - struct list_head rbd_empty; - spinlock_t lock; - struct workqueue_struct *alloc_wq; - struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rba: allocator for RX replenishing + * @rx_replenish: work that will be called when buffers need to be allocated * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct iwl_rb_allocator rba; + struct work_struct rx_replenish; struct iwl_trans *trans; struct iwl_drv *drv; diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index a3fbaa0ef5e0..adad8d0fae7f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,29 +74,16 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. - * When the interrupt handler is called, the request is processed. - * The page is either stolen - transferred to the upper layer - * or reused - added immediately to the iwl->rxq->rx_free list. - * + When the page is stolen - the driver updates the matching queue's used - * count, detaches the RBD and transfers it to the queue used list. - * When there are two used RBDs - they are transferred to the allocator empty - * list. Work is then scheduled for the allocator to start allocating - * eight buffers. - * When there are another 6 used RBDs - they are transferred to the allocator - * empty list and the driver tries to claim the pre-allocated buffers and - * add them to iwl->rxq->rx_free. If it fails - it continues to claim them - * until ready. - * When there are 8+ buffers in the free list - either from allocation or from - * 8 reused unstolen pages - restock is called to update the FW and indexes. - * + In order to make sure the allocator always has RBDs to use for allocation - * the allocator has initial pool in the size of num_queues*(8-2) - the - * maximum missing RBDs per allocation request (request posted with 2 - * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). - * The queues supplies the recycle of the rest of the RBDs. + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + If there are no allocated buffers in iwl->rxq->rx_free, + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the + * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -105,32 +92,18 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock. - * Used only during initialization. + * iwl_pcie_rxq_restock * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. - * iwl_pcie_rx_allocator() Background work for allocating pages. + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_pcie_rx_replenish * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. - * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. - * - * RBD life-cycle: - * - * Init: - * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue - * - * Regular Receive interrupt: - * Page Stolen: - * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> - * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue - * Page not Stolen: - * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } /* - * iwl_pcie_rx_alloc_page - allocates and returns a page. - * - */ -static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct page *page; - gfp_t gfp_mask = GFP_KERNEL; - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", - trans_pcie->rx_page_order); - /* Issue an error if the hardware has consumed more than half - * of its free buffer list and we don't have enough - * pre-allocated buffers. -` */ - if (rxq->free_count <= RX_LOW_WATERMARK && - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && - net_ratelimit()) - IWL_CRIT(trans, - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", - rxq->free_count); - return NULL; - } - return page; -} - -/* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again @@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; + gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) } spin_unlock(&rxq->lock); + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans); - if (!page) + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, " + "order: %d\n", + trans_pcie->rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(trans, "Failed to alloc_pages with %s." + "Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? + "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ return; + } spin_lock(&rxq->lock); @@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called only during initialization + * This is called as a scheduled work item (except for during initialization) */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) { - iwl_pcie_rxq_alloc_rbs(trans); + iwl_pcie_rxq_alloc_rbs(trans, gfp); iwl_pcie_rxq_restock(trans); } -/* - * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues - * - * Allocates for each received request 8 pages - * Called as a scheduled work item. - */ -static void iwl_pcie_rx_allocator(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - while (atomic_read(&rba->req_pending)) { - int i; - struct list_head local_empty; - struct list_head local_allocated; - - INIT_LIST_HEAD(&local_allocated); - spin_lock(&rba->lock); - /* swap out the entire rba->rbd_empty to a local list */ - list_replace_init(&rba->rbd_empty, &local_empty); - spin_unlock(&rba->lock); - - for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { - struct iwl_rx_mem_buffer *rxb; - struct page *page; - - /* List should never be empty - each reused RBD is - * returned to the list, and initial pool covers any - * possible gap between the time the page is allocated - * to the time the RBD is added. - */ - BUG_ON(list_empty(&local_empty)); - /* Get the first rxb from the rbd list */ - rxb = list_first_entry(&local_empty, - struct iwl_rx_mem_buffer, list); - BUG_ON(rxb->page); - - /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans); - if (!page) - continue; - rxb->page = page; - - /* Get physical address of the RB */ - rxb->page_dma = dma_map_page(trans->dev, page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, rxb->page_dma)) { - rxb->page = NULL; - __free_pages(page, trans_pcie->rx_page_order); - continue; - } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - /* move the allocated entry to the out list */ - list_move(&rxb->list, &local_allocated); - i++; - } - - spin_lock(&rba->lock); - /* add the allocated rbds to the allocator allocated list */ - list_splice_tail(&local_allocated, &rba->rbd_allocated); - /* add the unused rbds back to the allocator empty list */ - list_splice_tail(&local_empty, &rba->rbd_empty); - spin_unlock(&rba->lock); - - atomic_dec(&rba->req_pending); - atomic_inc(&rba->req_ready); - } -} - -/* - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages -.* -.* Called by queue when the queue posted allocation request and - * has freed 8 RBDs in order to restock itself. - */ -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, - struct iwl_rx_mem_buffer - *out[RX_CLAIM_REQ_ALLOC]) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - if (atomic_dec_return(&rba->req_ready) < 0) { - atomic_inc(&rba->req_ready); - IWL_DEBUG_RX(trans, - "Allocation request not ready, pending requests = %d\n", - atomic_read(&rba->req_pending)); - return -ENOMEM; - } - - spin_lock(&rba->lock); - for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { - /* Get next free Rx buffer, remove it from free list */ - out[i] = list_first_entry(&rba->rbd_allocated, - struct iwl_rx_mem_buffer, list); - list_del(&out[i]->list); - } - spin_unlock(&rba->lock); - - return 0; -} - -static void iwl_pcie_rx_allocator_work(struct work_struct *data) +static void iwl_pcie_rx_replenish_work(struct work_struct *data) { - struct iwl_rb_allocator *rba_p = - container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = - container_of(rba_p, struct iwl_trans_pcie, rba); + container_of(data, struct iwl_trans_pcie, rx_replenish); - iwl_pcie_rx_allocator(trans_pcie->trans); + iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); - spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; - rxq->used_count = 0; - for (i = 0; i < RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) -{ - int i; - - lockdep_assert_held(&rba->lock); - - INIT_LIST_HEAD(&rba->rbd_allocated); - INIT_LIST_HEAD(&rba->rbd_empty); - - for (i = 0; i < RX_POOL_SIZE; i++) - list_add(&rba->pool[i].list, &rba->rbd_empty); -} - -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - lockdep_assert_held(&rba->lock); - - for (i = 0; i < RX_POOL_SIZE; i++) { - if (!rba->pool[i].page) - continue; - dma_unmap_page(trans->dev, rba->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); - rba->pool[i].page = NULL; - } -} - int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } - if (!rba->alloc_wq) - rba->alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); - INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); - - spin_lock(&rba->lock); - atomic_set(&rba->req_pending, 0); - atomic_set(&rba->req_ready, 0); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rx_free_rba(trans); - iwl_pcie_rx_init_rba(rba); - spin_unlock(&rba->lock); spin_lock(&rxq->lock); + INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); + /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans); + iwl_pcie_rx_replenish(trans, GFP_KERNEL); iwl_pcie_rx_hw_init(trans, rxq); @@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&rba->rx_alloc); - if (rba->alloc_wq) { - destroy_workqueue(rba->alloc_wq); - rba->alloc_wq = NULL; - } - - spin_lock(&rba->lock); - iwl_pcie_rx_free_rba(trans); - spin_unlock(&rba->lock); + cancel_work_sync(&trans_pcie->rx_replenish); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } -/* - * iwl_pcie_rx_reuse_rbd - Recycle used RBDs - * - * Called when a RBD can be reused. The RBD is transferred to the allocator. - * When there are 2 empty RBDs - a request for allocation is posted - */ -static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb, - struct iwl_rxq *rxq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - /* Count the used RBDs */ - rxq->used_count++; - - /* Move the RBD to the used list, will be moved to allocator in batches - * before claiming or posting a request*/ - list_add_tail(&rxb->list, &rxq->rx_used); - - /* If we have RX_POST_REQ_ALLOC new released rx buffers - - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is - * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, - * after but we still need to post another request. - */ - if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { - /* Move the 2 RBDs to the allocator ownership. - Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); - - atomic_inc(&rba->req_pending); - queue_work(rba->alloc_wq, &rba->rx_alloc); - } -} - static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { @@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); + list_add_tail(&rxb->list, &rxq->rx_used); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); + list_add_tail(&rxb->list, &rxq->rx_used); } /* @@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i, j; + u32 r, i; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; restart: spin_lock(&rxq->lock); @@ -957,6 +720,14 @@ restart: if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + while (i != r) { struct iwl_rx_mem_buffer *rxb; @@ -968,48 +739,29 @@ restart: iwl_pcie_rx_handle_rb(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; - - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - - * try to claim the pre-allocated buffers from the allocator */ - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; - - /* Add the remaining 6 empty RBDs for allocator use */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); - - /* If not ready - continue, will try to reclaim later. - * No need to reschedule work - allocator exits only on - * success */ - if (!iwl_pcie_rx_allocator_get(trans, out)) { - /* If success - then RX_CLAIM_REQ_ALLOC - * buffers were retrieved and should be added - * to free list */ - rxq->used_count -= RX_CLAIM_REQ_ALLOC; - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { - list_add_tail(&out[j]->list, - &rxq->rx_free); - rxq->free_count++; - } + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + count = 0; + goto restart; } } - /* handle restock for two cases: - * - we just pulled buffers from the allocator - * - we have 8+ unstolen pages accumulated */ - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans); - goto restart; - } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); + if (fill_rx) + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + else + iwl_pcie_rxq_restock(trans); + if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 43ae658af6ec..6203c4ad9bba 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { - if (!trans->cfg->apmg_not_supported) + if (trans->cfg->apmg_not_supported) return; if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) @@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; u16 pci_cmd; - int err; + int ret; trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie, 0); @@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->ref_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); - err = pci_enable_device(pdev); - if (err) + ret = pci_enable_device(pdev); + if (ret) goto out_no_pci; if (!cfg->base_params->pcie_l1_allowed) { @@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ - if (err) { + if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto out_pci_disable_device; } } - err = pci_request_regions(pdev, DRV_NAME); - if (err) { + ret = pci_request_regions(pdev, DRV_NAME); + if (ret) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto out_pci_disable_device; } @@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); if (!trans_pcie->hw_base) { dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); - err = -ENODEV; + ret = -ENODEV; goto out_pci_release_regions; } @@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); - err = pci_enable_msi(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { @@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { unsigned long flags; - int ret; trans->hw_rev = (trans->hw_rev & 0xfff0) | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); + ret = iwl_pcie_prepare_card_hw(trans); + if (ret) { + IWL_WARN(trans, "Exit HW not ready\n"); + goto out_pci_disable_msi; + } + /* * in-order to recognize C step driver should read chip version * id located at the AUX bus MISC address space. @@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - if (iwl_pcie_alloc_ict(trans)) + ret = iwl_pcie_alloc_ict(trans); + if (ret) goto out_pci_disable_msi; - err = request_threaded_irq(pdev->irq, iwl_pcie_isr, + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); - if (err) { + if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } @@ -2617,5 +2623,5 @@ out_pci_disable_device: pci_disable_device(pdev); out_no_pci: iwl_trans_free(trans); - return ERR_PTR(err); + return ERR_PTR(ret); } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 880d0d63e872..7d50711476fe 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) smp_rmb(); while (dc != dp) { - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); pending_idx = queue->dealloc_ring[pending_index(dc++)]; - pending_idx_release[gop-queue->tx_unmap_ops] = + pending_idx_release[gop - queue->tx_unmap_ops] = pending_idx; - queue->pages_to_unmap[gop-queue->tx_unmap_ops] = + queue->pages_to_unmap[gop - queue->tx_unmap_ops] = queue->mmap_pages[pending_idx]; gnttab_set_unmap_op(gop, idx_to_kaddr(queue, pending_idx), diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index efcf2a2b3975..6177315ab74e 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data) spin_lock_irqsave(&pc->irq_lock[bank], flags); bcm2835_gpio_irq_config(pc, gpio, false); + /* Clear events that were latched prior to clearing event sources */ + bcm2835_gpio_set_bit(pc, GPEDS0, gpio); clear_bit(offset, &pc->enabled_irq_map[bank]); spin_unlock_irqrestore(&pc->irq_lock[bank], flags); } diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 5fd4437cee15..88a7fac11bd4 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev, unsigned num_configs) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); - const struct imx1_pinctrl_soc_info *info = ipctl->info; int i; for (i = 0; i != num_configs; ++i) { imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", - info->pins[pin_id].name); + pin_desc_get(pctldev, pin_id)->name); } return 0; diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 557d0f2a3031..97681fac082e 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = { .set_mux = abx500_pmx_set, .gpio_request_enable = abx500_gpio_request_enable, .gpio_disable_free = abx500_gpio_disable_free, - .strict = true, }; static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index ef0b697639a7..347c763a6a78 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (param) + if (param_val) *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); else *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); @@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (param) + if (param_val) *reg &= ~LPC18XX_SCU_PIN_ZIF; else *reg |= LPC18XX_SCU_PIN_ZIF; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index b2de09d3b1a0..0b8d480171a3 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs, int res; res = request_irq(pcs_soc->irq, pcs_irq_handler, - IRQF_SHARED | IRQF_NO_SUSPEND, + IRQF_SHARED | IRQF_NO_SUSPEND | + IRQF_NO_THREAD, name, pcs_soc); if (res) { pcs_soc->irq = -1; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 3dd5a3b2ac62..c760bf43d116 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -33,11 +33,6 @@ #include "../core.h" #include "pinctrl-samsung.h" -#define GROUP_SUFFIX "-grp" -#define GSUFFIX_LEN sizeof(GROUP_SUFFIX) -#define FUNCTION_SUFFIX "-mux" -#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX) - /* list of all possible config options supported */ static struct pin_config { const char *property; diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index c7508d5f6886..0874cfee6889 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -224,7 +224,7 @@ struct sh_pfc_soc_info { /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ #define _GP_GPIO(bank, _pin, _name, sfx) \ - [(bank * 32) + _pin] = { \ + { \ .pin = (bank * 32) + _pin, \ .name = __stringify(_name), \ .enum_id = _name##_DATA, \ diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c index f87a5eaf75da..0afaf79a4e51 100644 --- a/drivers/pinctrl/spear/pinctrl-spear.c +++ b/drivers/pinctrl/spear/pinctrl-spear.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * Inspired from: * - U300 Pinctl drivers diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h index dc8bf85ecb2a..27c2cc8d83ad 100644 --- a/drivers/pinctrl/spear/pinctrl-spear.h +++ b/drivers/pinctrl/spear/pinctrl-spear.h @@ -2,7 +2,7 @@ * Driver header file for the ST Microelectronics SPEAr pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c index a7bdc537efa7..92611bb757ac 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1310.c +++ b/drivers/pinctrl/spear/pinctrl-spear1310.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr1310 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -2730,7 +2730,7 @@ static void __exit spear1310_pinctrl_exit(void) } module_exit(spear1310_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c index f43ec85a0328..f842e9dc40d0 100644 --- a/drivers/pinctrl/spear/pinctrl-spear1340.c +++ b/drivers/pinctrl/spear/pinctrl-spear1340.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr1340 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -2046,7 +2046,7 @@ static void __exit spear1340_pinctrl_exit(void) } module_exit(spear1340_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c index da8990a8eeef..d998a2ccff48 100644 --- a/drivers/pinctrl/spear/pinctrl-spear300.c +++ b/drivers/pinctrl/spear/pinctrl-spear300.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr300 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -703,7 +703,7 @@ static void __exit spear300_pinctrl_exit(void) } module_exit(spear300_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c index 31ede51e819b..609b18aceb16 100644 --- a/drivers/pinctrl/spear/pinctrl-spear310.c +++ b/drivers/pinctrl/spear/pinctrl-spear310.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr310 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -426,7 +426,7 @@ static void __exit spear310_pinctrl_exit(void) } module_exit(spear310_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c index 506e40b641e0..c07114431bd4 100644 --- a/drivers/pinctrl/spear/pinctrl-spear320.c +++ b/drivers/pinctrl/spear/pinctrl-spear320.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr320 pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -3467,7 +3467,7 @@ static void __exit spear320_pinctrl_exit(void) } module_exit(spear320_pinctrl_exit); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c index 12ee21af766b..d3119aafe709 100644 --- a/drivers/pinctrl/spear/pinctrl-spear3xx.c +++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c @@ -2,7 +2,7 @@ * Driver for the ST Microelectronics SPEAr3xx pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h index 7860b36053c4..ce19dcf8f08b 100644 --- a/drivers/pinctrl/spear/pinctrl-spear3xx.h +++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h @@ -2,7 +2,7 @@ * Header file for the ST Microelectronics SPEAr3xx pinmux * * Copyright (C) 2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index ed317ccac4a2..aaeeae81e3a9 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -309,12 +309,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = { static struct calling_interface_buffer *buffer; static DEFINE_MUTEX(buffer_mutex); -static int hwswitch_state; +static void clear_buffer(void) +{ + memset(buffer, 0, sizeof(struct calling_interface_buffer)); +} static void get_buffer(void) { mutex_lock(&buffer_mutex); - memset(buffer, 0, sizeof(struct calling_interface_buffer)); + clear_buffer(); } static void release_buffer(void) @@ -548,21 +551,41 @@ static int dell_rfkill_set(void *data, bool blocked) int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; int hwswitch_bit = (unsigned long)data - 1; + int hwswitch; + int status; + int ret; get_buffer(); + + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + status = buffer->output[1]; + + if (ret != 0) + goto out; + + clear_buffer(); + + buffer->input[0] = 0x2; dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + hwswitch = buffer->output[1]; /* If the hardware switch controls this radio, and the hardware switch is disabled, always disable the radio */ - if ((hwswitch_state & BIT(hwswitch_bit)) && - !(buffer->output[1] & BIT(16))) + if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) && + (status & BIT(0)) && !(status & BIT(16))) disable = 1; + clear_buffer(); + buffer->input[0] = (1 | (radio<<8) | (disable << 16)); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + out: release_buffer(); - return 0; + return dell_smi_error(ret); } /* Must be called with the buffer held */ @@ -572,6 +595,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, if (status & BIT(0)) { /* Has hw-switch, sync sw_state to BIOS */ int block = rfkill_blocked(rfkill); + clear_buffer(); buffer->input[0] = (1 | (radio << 8) | (block << 16)); dell_send_request(buffer, 17, 11); } else { @@ -581,23 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, } static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, - int status) + int status, int hwswitch) { - if (hwswitch_state & (BIT(radio - 1))) + if (hwswitch & (BIT(radio - 1))) rfkill_set_hw_state(rfkill, !(status & BIT(16))); } static void dell_rfkill_query(struct rfkill *rfkill, void *data) { + int radio = ((unsigned long)data & 0xF); + int hwswitch; int status; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; - dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); + if (ret != 0 || !(status & BIT(0))) { + release_buffer(); + return; + } + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + hwswitch = buffer->output[1]; release_buffer(); + + if (ret != 0) + return; + + dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch); } static const struct rfkill_ops dell_rfkill_ops = { @@ -609,13 +653,27 @@ static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) { + int hwswitch_state; + int hwswitch_ret; int status; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + hwswitch_ret = buffer->output[0]; + hwswitch_state = buffer->output[1]; + release_buffer(); + seq_printf(s, "return:\t%d\n", ret); seq_printf(s, "status:\t0x%X\n", status); seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", status & BIT(0)); @@ -657,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "Bit 21: WiGig is blocked: %lu\n", (status & BIT(21)) >> 21); - seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state); + seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret); + seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state); seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n", hwswitch_state & BIT(0)); seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n", @@ -693,25 +752,43 @@ static const struct file_operations dell_debugfs_fops = { static void dell_update_rfkill(struct work_struct *ignored) { + int hwswitch = 0; int status; + int ret; get_buffer(); + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; + if (ret != 0) + goto out; + + clear_buffer(); + + buffer->input[0] = 0x2; + dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; + + if (ret == 0 && (status & BIT(0))) + hwswitch = buffer->output[1]; + if (wifi_rfkill) { - dell_rfkill_update_hw_state(wifi_rfkill, 1, status); + dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch); dell_rfkill_update_sw_state(wifi_rfkill, 1, status); } if (bluetooth_rfkill) { - dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status); + dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status, + hwswitch); dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status); } if (wwan_rfkill) { - dell_rfkill_update_hw_state(wwan_rfkill, 3, status); + dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch); dell_rfkill_update_sw_state(wwan_rfkill, 3, status); } + out: release_buffer(); } static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); @@ -773,21 +850,17 @@ static int __init dell_setup_rfkill(void) get_buffer(); dell_send_request(buffer, 17, 11); + ret = buffer->output[0]; status = buffer->output[1]; - buffer->input[0] = 0x2; - dell_send_request(buffer, 17, 11); - hwswitch_state = buffer->output[1]; release_buffer(); - if (!(status & BIT(0))) { - if (force_rfkill) { - /* No hwsitch, clear all hw-controlled bits */ - hwswitch_state &= ~7; - } else { - /* rfkill is only tested on laptops with a hwswitch */ - return 0; - } - } + /* dell wireless info smbios call is not supported */ + if (ret != 0) + return 0; + + /* rfkill is only tested on laptops with a hwswitch */ + if (!(status & BIT(0)) && !force_rfkill) + return 0; if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, @@ -932,47 +1005,50 @@ static void dell_cleanup_rfkill(void) static int dell_send_intensity(struct backlight_device *bd) { - int ret = 0; + int token; + int ret; + + token = find_token_location(BRIGHTNESS_TOKEN); + if (token == -1) + return -ENODEV; get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); + buffer->input[0] = token; buffer->input[1] = bd->props.brightness; - if (buffer->input[0] == -1) { - ret = -ENODEV; - goto out; - } - if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 1, 2); else dell_send_request(buffer, 1, 1); - out: + ret = dell_smi_error(buffer->output[0]); + release_buffer(); return ret; } static int dell_get_intensity(struct backlight_device *bd) { - int ret = 0; + int token; + int ret; - get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); + token = find_token_location(BRIGHTNESS_TOKEN); + if (token == -1) + return -ENODEV; - if (buffer->input[0] == -1) { - ret = -ENODEV; - goto out; - } + get_buffer(); + buffer->input[0] = token; if (power_supply_is_system_supplied() > 0) dell_send_request(buffer, 0, 2); else dell_send_request(buffer, 0, 1); - ret = buffer->output[1]; + if (buffer->output[0]) + ret = dell_smi_error(buffer->output[0]); + else + ret = buffer->output[1]; - out: release_buffer(); return ret; } @@ -2036,6 +2112,7 @@ static void kbd_led_exit(void) static int __init dell_init(void) { int max_intensity = 0; + int token; int ret; if (!dmi_check_system(dell_device_table)) @@ -2094,13 +2171,15 @@ static int __init dell_init(void) if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; - get_buffer(); - buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); - if (buffer->input[0] != -1) { + token = find_token_location(BRIGHTNESS_TOKEN); + if (token != -1) { + get_buffer(); + buffer->input[0] = token; dell_send_request(buffer, 0, 2); - max_intensity = buffer->output[3]; + if (buffer->output[0] == 0) + max_intensity = buffer->output[3]; + release_buffer(); } - release_buffer(); if (max_intensity) { struct backlight_properties props; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index d734763dab69..105cfffe82c6 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev { struct completion cmd_complete; /* The following PMC BARs share the same ACPI device with the IPC */ - void *acpi_io_base; + resource_size_t acpi_io_base; int acpi_io_size; struct platform_device *tco_dev; /* gcr */ - void *gcr_base; + resource_size_t gcr_base; int gcr_size; /* punit */ - void *punit_base; + resource_size_t punit_base; int punit_size; - void *punit_base2; + resource_size_t punit_base2; int punit_size2; struct platform_device *punit_dev; } ipcdev; @@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void) return ret; } -/* - * intel_pmc_ipc_simple_command - * @cmd: command - * @sub: sub type +/** + * intel_pmc_ipc_simple_command() - Simple IPC command + * @cmd: IPC command code. + * @sub: IPC command sub type. + * + * Send a simple IPC command to PMC when don't need to specify + * input/output data and source/dest pointers. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_simple_command(int cmd, int sub) { @@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub) } EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); -/* - * intel_pmc_ipc_raw_cmd - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords - * @sptr: data writing to SPTR register - * @dptr: data writing to DPTR register +/** + * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers + * @cmd: IPC command code. + * @sub: IPC command sub type. + * @in: input data of this IPC command. + * @inlen: input data length in bytes. + * @out: output data of this IPC command. + * @outlen: output data length in dwords. + * @sptr: data writing to SPTR register. + * @dptr: data writing to DPTR register. + * + * Send an IPC command to PMC with input/output data and source/dest pointers. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen, u32 dptr, u32 sptr) @@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, } EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); -/* - * intel_pmc_ipc_command - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in bytes - * @out: output data - * @outlen: output length in dwords +/** + * intel_pmc_ipc_command() - IPC command with input/output data + * @cmd: IPC command code. + * @sub: IPC command sub type. + * @in: input data of this IPC command. + * @inlen: input data length in bytes. + * @out: output data of this IPC command. + * @outlen: output data length in dwords. + * + * Send an IPC command to PMC with input/output data. + * + * Return: an IPC error code or 0 on success. */ int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, u32 outlen) @@ -480,11 +493,11 @@ static int ipc_create_punit_device(void) pdev->dev.parent = ipcdev.dev; res = punit_res; - res->start = (resource_size_t)ipcdev.punit_base; + res->start = ipcdev.punit_base; res->end = res->start + ipcdev.punit_size - 1; res = punit_res + PUNIT_RESOURCE_INTER; - res->start = (resource_size_t)ipcdev.punit_base2; + res->start = ipcdev.punit_base2; res->end = res->start + ipcdev.punit_size2 - 1; ret = platform_device_add_resources(pdev, punit_res, @@ -522,15 +535,15 @@ static int ipc_create_tco_device(void) pdev->dev.parent = ipcdev.dev; res = tco_res + TCO_RESOURCE_ACPI_IO; - res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET; + res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET; res->end = res->start + TCO_REGS_SIZE - 1; res = tco_res + TCO_RESOURCE_SMI_EN_IO; - res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET; + res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET; res->end = res->start + SMI_EN_SIZE - 1; res = tco_res + TCO_RESOURCE_GCR_MEM; - res->start = (resource_size_t)ipcdev.gcr_base; + res->start = ipcdev.gcr_base; res->end = res->start + ipcdev.gcr_size - 1; ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res)); @@ -589,7 +602,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.acpi_io_base = (void *)res->start; + ipcdev.acpi_io_base = res->start; ipcdev.acpi_io_size = size; dev_info(&pdev->dev, "io res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -601,7 +614,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.punit_base = (void *)res->start; + ipcdev.punit_base = res->start; ipcdev.punit_size = size; dev_info(&pdev->dev, "punit data res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -613,7 +626,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) return -ENXIO; } size = resource_size(res); - ipcdev.punit_base2 = (void *)res->start; + ipcdev.punit_base2 = res->start; ipcdev.punit_size2 = size; dev_info(&pdev->dev, "punit interface res: %llx %x\n", (long long)res->start, (int)resource_size(res)); @@ -637,7 +650,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) } ipcdev.ipc_base = addr; - ipcdev.gcr_base = (void *)(res->start + size); + ipcdev.gcr_base = res->start + size; ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE; dev_info(&pdev->dev, "ipc res: %llx %x\n", (long long)res->start, (int)resource_size(res)); diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 001b199a8c33..187d1086d15c 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -216,13 +216,13 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) int nc; u32 offset = 0; int err; - u8 cbuf[IPC_WWBUF_SIZE] = { }; + u8 cbuf[IPC_WWBUF_SIZE]; u32 *wbuf = (u32 *)&cbuf; - mutex_lock(&ipclock); - memset(cbuf, 0, sizeof(cbuf)); + mutex_lock(&ipclock); + if (ipcdev.pdev == NULL) { mutex_unlock(&ipclock); return -ENODEV; diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index 4b62d1a875e4..2b08cac62f07 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c @@ -88,7 +88,7 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct armada38x_rtc *rtc = dev_get_drvdata(dev); int ret = 0; - unsigned long time, flags; + unsigned long time; ret = rtc_tm_to_time(tm, &time); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index c0090b698ff3..eab230be5a54 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -343,6 +343,8 @@ static int mtk_rtc_probe(struct platform_device *pdev) goto out_dispose_irq; } + device_init_wakeup(&pdev->dev, 1); + rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev, &mtk_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { @@ -351,8 +353,6 @@ static int mtk_rtc_probe(struct platform_device *pdev) goto out_free_irq; } - device_init_wakeup(&pdev->dev, 1); - return 0; out_free_irq: diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1aec8ff0b587..f73d2f579a7e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1863,6 +1863,33 @@ static void __dasd_device_check_expire(struct dasd_device *device) } /* + * return 1 when device is not eligible for IO + */ +static int __dasd_device_is_unusable(struct dasd_device *device, + struct dasd_ccw_req *cqr) +{ + int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); + + if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { + /* dasd is being set offline. */ + return 1; + } + if (device->stopped) { + if (device->stopped & mask) { + /* stopped and CQR will not change that. */ + return 1; + } + if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { + /* CQR is not able to change device to + * operational. */ + return 1; + } + /* CQR required to get device operational. */ + } + return 0; +} + +/* * Take a look at the first request on the ccw queue and check * if it needs to be started. */ @@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); if (cqr->status != DASD_CQR_QUEUED) return; - /* when device is stopped, return request to previous layer - * exception: only the disconnect or unresumed bits are set and the - * cqr is a path verification request - */ - if (device->stopped && - !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) - && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { + /* if device is not usable return request to upper layer */ + if (__dasd_device_is_unusable(device, cqr)) { cqr->intrc = -EAGAIN; cqr->status = DASD_CQR_CLEARED; dasd_schedule_device_bh(device); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index a2597e683e79..ee3a6faae22a 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) struct dasd_device, alias_list); spin_unlock_irqrestore(&lcu->lock, flags); alias_priv = (struct dasd_eckd_private *) alias_device->private; - if ((alias_priv->count < private->count) && !alias_device->stopped) + if ((alias_priv->count < private->count) && !alias_device->stopped && + !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags)) return alias_device; else return NULL; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index aeed7969fd79..7bc6df3100ef 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -7,6 +7,7 @@ #define KMSG_COMPONENT "sclp_early" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include <linux/errno.h> #include <asm/ctl_reg.h> #include <asm/sclp.h> #include <asm/ipl.h> diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 08f1830cbfc4..01bf1f5cf2e9 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +static int zcrypt_hwrng_seed = 1; +module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); +MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); + static DEFINE_SPINLOCK(zcrypt_device_lock); static LIST_HEAD(zcrypt_device_list); static int zcrypt_device_count = 0; @@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) static struct hwrng zcrypt_rng_dev = { .name = "zcrypt", .data_read = zcrypt_rng_data_read, + .quality = 990, }; static int zcrypt_rng_device_add(void) @@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void) goto out; } zcrypt_rng_buffer_index = 0; + if (!zcrypt_hwrng_seed) + zcrypt_rng_dev.quality = 0; rc = hwrng_register(&zcrypt_rng_dev); if (rc) goto out_free; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 1ac38e73df7e..9ad41168d26d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, depth = simple_strtoul(buf, NULL, 0); - if (depth < 1 || depth > sht->can_queue) + if (depth < 1 || depth > sdev->host->can_queue) return -EINVAL; retval = sht->change_queue_depth(sdev, depth); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index a85292b1d09d..e3cd3ece4412 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo) return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); } -static int srp_parse_tmo(int *tmo, const char *buf) +int srp_parse_tmo(int *tmo, const char *buf) { int res = 0; @@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf) return res; } +EXPORT_SYMBOL(srp_parse_tmo); static ssize_t show_reconnect_delay(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 3f25b8fa921d..871f3553987d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1329,9 +1329,9 @@ static int st_open(struct inode *inode, struct file *filp) spin_lock(&st_use_lock); STp->in_use = 0; spin_unlock(&st_use_lock); - scsi_tape_put(STp); if (resumed) scsi_autopm_put_device(STp->device); + scsi_tape_put(STp); return retval; } diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig index b8ee81840666..3f287c48e082 100644 --- a/drivers/staging/board/Kconfig +++ b/drivers/staging/board/Kconfig @@ -1,6 +1,6 @@ config STAGING_BOARD bool "Staging Board Support" - depends on OF_ADDRESS + depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP help Select to enable per-board staging support code. diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index 7125eb955ae5..8a9d4a0de129 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -31,7 +31,6 @@ #define DEBUG_PORTAL_ALLOC #define DEBUG_SUBSYSTEM S_LND -#include <asm/irq.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/if.h> diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index ed040fbb7df8..b0c8e235b982 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1418,7 +1418,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->current_aid = conf->aid; - if (changed & BSS_CHANGED_BSSID) { + if (changed & BSS_CHANGED_BSSID && conf->bssid) { unsigned long flags; spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index f97323f19acf..af572d718135 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, priv->current_aid = conf->aid; - if (changed & BSS_CHANGED_BSSID) + if (changed & BSS_CHANGED_BSSID && conf->bssid) vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid); diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index e5b546f1152e..c3cc1a78d1e2 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Backup Host regs */ - hr = hsotg->hr_backup; - if (!hr) { - hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL); - if (!hr) { - dev_err(hsotg->dev, "%s: can't allocate host regs\n", - __func__); - return -ENOMEM; - } - - hsotg->hr_backup = hr; - } + hr = &hsotg->hr_backup; hr->hcfg = readl(hsotg->regs + HCFG); hr->haintmsk = readl(hsotg->regs + HAINTMSK); for (i = 0; i < hsotg->core_params->host_channels; ++i) @@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) hr->hprt0 = readl(hsotg->regs + HPRT0); hr->hfir = readl(hsotg->regs + HFIR); + hr->valid = true; return 0; } @@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore host regs */ - hr = hsotg->hr_backup; - if (!hr) { + hr = &hsotg->hr_backup; + if (!hr->valid) { dev_err(hsotg->dev, "%s: no host registers to restore\n", __func__); return -EINVAL; } + hr->valid = false; writel(hr->hcfg, hsotg->regs + HCFG); writel(hr->haintmsk, hsotg->regs + HAINTMSK); @@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Backup dev regs */ - dr = hsotg->dr_backup; - if (!dr) { - dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL); - if (!dr) { - dev_err(hsotg->dev, "%s: can't allocate device regs\n", - __func__); - return -ENOMEM; - } - - hsotg->dr_backup = dr; - } + dr = &hsotg->dr_backup; dr->dcfg = readl(hsotg->regs + DCFG); dr->dctl = readl(hsotg->regs + DCTL); @@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i)); dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i)); } - + dr->valid = true; return 0; } @@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore dev regs */ - dr = hsotg->dr_backup; - if (!dr) { + dr = &hsotg->dr_backup; + if (!dr->valid) { dev_err(hsotg->dev, "%s: no device registers to restore\n", __func__); return -EINVAL; } + dr->valid = false; writel(dr->dcfg, hsotg->regs + DCFG); writel(dr->dctl, hsotg->regs + DCTL); @@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) int i; /* Backup global regs */ - gr = hsotg->gr_backup; - if (!gr) { - gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL); - if (!gr) { - dev_err(hsotg->dev, "%s: can't allocate global regs\n", - __func__); - return -ENOMEM; - } - - hsotg->gr_backup = gr; - } + gr = &hsotg->gr_backup; gr->gotgctl = readl(hsotg->regs + GOTGCTL); gr->gintmsk = readl(hsotg->regs + GINTMSK); @@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg) for (i = 0; i < MAX_EPS_CHANNELS; i++) gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i)); + gr->valid = true; return 0; } @@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg) dev_dbg(hsotg->dev, "%s\n", __func__); /* Restore global regs */ - gr = hsotg->gr_backup; - if (!gr) { + gr = &hsotg->gr_backup; + if (!gr->valid) { dev_err(hsotg->dev, "%s: no global registers to restore\n", __func__); return -EINVAL; } + gr->valid = false; writel(0xffffffff, hsotg->regs + GINTSTS); writel(gr->gotgctl, hsotg->regs + GOTGCTL); diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 53b8de03f102..0ed87620941b 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -492,6 +492,7 @@ struct dwc2_gregs_backup { u32 gdfifocfg; u32 dtxfsiz[MAX_EPS_CHANNELS]; u32 gpwrdn; + bool valid; }; /** @@ -521,6 +522,7 @@ struct dwc2_dregs_backup { u32 doepctl[MAX_EPS_CHANNELS]; u32 doeptsiz[MAX_EPS_CHANNELS]; u32 doepdma[MAX_EPS_CHANNELS]; + bool valid; }; /** @@ -538,6 +540,7 @@ struct dwc2_hregs_backup { u32 hcintmsk[MAX_EPS_CHANNELS]; u32 hprt0; u32 hfir; + bool valid; }; /** @@ -705,9 +708,9 @@ struct dwc2_hsotg { struct work_struct wf_otg; struct timer_list wkp_timer; enum dwc2_lx_state lx_state; - struct dwc2_gregs_backup *gr_backup; - struct dwc2_dregs_backup *dr_backup; - struct dwc2_hregs_backup *hr_backup; + struct dwc2_gregs_backup gr_backup; + struct dwc2_dregs_backup dr_backup; + struct dwc2_hregs_backup hr_backup; struct dentry *debug_root; struct debugfs_regset32 *regset; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index b10377c65064..f845c41fe9e5 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -359,10 +359,9 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg) /* Caller must hold driver lock */ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, - struct dwc2_hcd_urb *urb, void **ep_handle, - gfp_t mem_flags) + struct dwc2_hcd_urb *urb, struct dwc2_qh *qh, + struct dwc2_qtd *qtd) { - struct dwc2_qtd *qtd; u32 intr_mask; int retval; int dev_speed; @@ -386,18 +385,15 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, return -ENODEV; } - qtd = kzalloc(sizeof(*qtd), mem_flags); if (!qtd) - return -ENOMEM; + return -EINVAL; dwc2_hcd_qtd_init(qtd, urb); - retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, - mem_flags); + retval = dwc2_hcd_qtd_add(hsotg, qtd, qh); if (retval) { dev_err(hsotg->dev, "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", retval); - kfree(qtd); return retval; } @@ -2445,6 +2441,9 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, u32 tflags = 0; void *buf; unsigned long flags; + struct dwc2_qh *qh; + bool qh_allocated = false; + struct dwc2_qtd *qtd; if (dbg_urb(urb)) { dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); @@ -2523,15 +2522,32 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, urb->iso_frame_desc[i].length); urb->hcpriv = dwc2_urb; + qh = (struct dwc2_qh *) ep->hcpriv; + /* Create QH for the endpoint if it doesn't exist */ + if (!qh) { + qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags); + if (!qh) { + retval = -ENOMEM; + goto fail0; + } + ep->hcpriv = qh; + qh_allocated = true; + } + + qtd = kzalloc(sizeof(*qtd), mem_flags); + if (!qtd) { + retval = -ENOMEM; + goto fail1; + } spin_lock_irqsave(&hsotg->lock, flags); retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval) - goto fail1; + goto fail2; - retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); + retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); if (retval) - goto fail2; + goto fail3; if (alloc_bandwidth) { dwc2_allocate_bus_bandwidth(hcd, @@ -2543,12 +2559,25 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return 0; -fail2: +fail3: dwc2_urb->priv = NULL; usb_hcd_unlink_urb_from_ep(hcd, urb); -fail1: +fail2: spin_unlock_irqrestore(&hsotg->lock, flags); urb->hcpriv = NULL; + kfree(qtd); +fail1: + if (qh_allocated) { + struct dwc2_qtd *qtd2, *qtd2_tmp; + + ep->hcpriv = NULL; + dwc2_hcd_qh_unlink(hsotg, qh); + /* Free each QTD in the QH's QTD list */ + list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list, + qtd_list_entry) + dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh); + dwc2_hcd_qh_free(hsotg, qh); + } fail0: kfree(dwc2_urb); diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h index 7b5841c40033..fc1054965552 100644 --- a/drivers/usb/dwc2/hcd.h +++ b/drivers/usb/dwc2/hcd.h @@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, /* Schedule Queue Functions */ /* Implemented in hcd_queue.c */ extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg); +extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, + struct dwc2_hcd_urb *urb, + gfp_t mem_flags); extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); @@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, - struct dwc2_qh **qh, gfp_t mem_flags); + struct dwc2_qh *qh); /* Unlinks and frees a QTD */ static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c index 9b5c36256627..3ad63d392e13 100644 --- a/drivers/usb/dwc2/hcd_queue.c +++ b/drivers/usb/dwc2/hcd_queue.c @@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, * * Return: Pointer to the newly allocated QH, or NULL on error */ -static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, +struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, gfp_t mem_flags) { @@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) * * @hsotg: The DWC HCD structure * @qtd: The QTD to add - * @qh: Out parameter to return queue head - * @atomic_alloc: Flag to do atomic alloc if needed + * @qh: Queue head to add qtd to * * Return: 0 if successful, negative error code otherwise * - * Finds the correct QH to place the QTD into. If it does not find a QH, it - * will create a new QH. If the QH to which the QTD is added is not currently - * scheduled, it is placed into the proper schedule based on its EP type. + * If the QH to which the QTD is added is not currently scheduled, it is placed + * into the proper schedule based on its EP type. */ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, - struct dwc2_qh **qh, gfp_t mem_flags) + struct dwc2_qh *qh) { - struct dwc2_hcd_urb *urb = qtd->urb; - int allocated = 0; int retval; - /* - * Get the QH which holds the QTD-list to insert to. Create QH if it - * doesn't exist. - */ - if (*qh == NULL) { - *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags); - if (*qh == NULL) - return -ENOMEM; - allocated = 1; + if (unlikely(!qh)) { + dev_err(hsotg->dev, "%s: Invalid QH\n", __func__); + retval = -EINVAL; + goto fail; } - retval = dwc2_hcd_qh_add(hsotg, *qh); + retval = dwc2_hcd_qh_add(hsotg, qh); if (retval) goto fail; - qtd->qh = *qh; - list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list); + qtd->qh = qh; + list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); return 0; - fail: - if (allocated) { - struct dwc2_qtd *qtd2, *qtd2_tmp; - struct dwc2_qh *qh_tmp = *qh; - - *qh = NULL; - dwc2_hcd_qh_unlink(hsotg, qh_tmp); - - /* Free each QTD in the QH's QTD list */ - list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list, - qtd_list_entry) - dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp); - - dwc2_hcd_qh_free(hsotg, qh_tmp); - } - return retval; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 5c110d8e293b..ff5773c66b84 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -446,10 +446,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc) /* Select the HS PHY interface */ switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: - if (!strncmp(dwc->hsphy_interface, "utmi", 4)) { + if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "utmi", 4)) { reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; break; - } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) { + } else if (dwc->hsphy_interface && + !strncmp(dwc->hsphy_interface, "ulpi", 4)) { reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } else { diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 4e3447bbd097..58b4657fc721 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1758,10 +1758,13 @@ unknown: * take such requests too, if that's ever needed: to work * in config 0, etc. */ - list_for_each_entry(f, &cdev->config->functions, list) - if (f->req_match && f->req_match(f, ctrl)) - goto try_fun_setup; - f = NULL; + if (cdev->config) { + list_for_each_entry(f, &cdev->config->functions, list) + if (f->req_match && f->req_match(f, ctrl)) + goto try_fun_setup; + f = NULL; + } + switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 0495c94a23d7..289e20119fea 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -571,7 +571,7 @@ static struct config_group *function_make( if (IS_ERR(fi)) return ERR_CAST(fi); - ret = config_item_set_name(&fi->group.cg_item, name); + ret = config_item_set_name(&fi->group.cg_item, "%s", name); if (ret) { usb_put_function_instance(fi); return ERR_PTR(ret); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 45b8c8b338df..6e7be91e6097 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) kiocb->private = p; - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); res = ffs_epfile_io(kiocb->ki_filp, p); if (res == -EIOCBQUEUED) @@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) kiocb->private = p; - kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); res = ffs_epfile_io(kiocb->ki_filp, p); if (res == -EIOCBQUEUED) diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index d2259c663996..f936268d26c6 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns) return -EINVAL; } - curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL); + curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL); if (unlikely(!curlun)) return -ENOMEM; @@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns) common->luns = curlun; common->nluns = nluns; - pr_info("Number of LUNs=%d\n", common->nluns); - return 0; } EXPORT_SYMBOL_GPL(fsg_common_set_nluns); @@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi) struct fsg_opts *opts = fsg_opts_from_func_inst(fi); struct fsg_common *common = opts->common; struct fsg_dev *fsg; + unsigned nluns, i; fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); if (unlikely(!fsg)) return ERR_PTR(-ENOMEM); mutex_lock(&opts->lock); + if (!opts->refcnt) { + for (nluns = i = 0; i < FSG_MAX_LUNS; ++i) + if (common->luns[i]) + nluns = i + 1; + if (!nluns) + pr_warn("No LUNS defined, continuing anyway\n"); + else + common->nluns = nluns; + pr_info("Number of LUNs=%u\n", common->nluns); + } opts->refcnt++; mutex_unlock(&opts->lock); + fsg->function.name = FSG_DRIVER_DESC; fsg->function.bind = fsg_bind; fsg->function.unbind = fsg_unbind; diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 6316aa5b1c49..ad50a67c1465 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -1145,7 +1145,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) if (opts->id && !midi->id) { status = -ENOMEM; mutex_unlock(&opts->lock); - goto kstrdup_fail; + goto setup_fail; } midi->in_ports = opts->in_ports; midi->out_ports = opts->out_ports; @@ -1164,8 +1164,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) return &midi->func; -kstrdup_fail: - f_midi_unregister_card(midi); setup_fail: for (--i; i >= 0; i--) kfree(midi->in_port[i]); diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index e547ea7f56b1..1137e3384218 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -1171,7 +1171,7 @@ static int fotg210_udc_probe(struct platform_device *pdev) udc_name, fotg210); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); - goto err_irq; + goto err_req; } ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget); @@ -1183,7 +1183,6 @@ static int fotg210_udc_probe(struct platform_device *pdev) return 0; err_add_udc: -err_irq: free_irq(ires->start, fotg210); err_req: diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 30842bc195f5..92d5f718659b 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb) #ifdef CONFIG_USB_MUSB_HOST return 1; #else - if (musb->port_mode == MUSB_PORT_MODE_HOST) - return 1; - return musb->g.dev.driver != NULL; + return musb->port_mode == MUSB_PORT_MODE_HOST; #endif } diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 8f7cb068d29b..3fcc0483a081 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy) { unsigned int vbus_value; + if (!mxs_phy->regmap_anatop) + return false; + if (mxs_phy->port_id == 0) regmap_read(mxs_phy->regmap_anatop, ANADIG_USB1_VBUS_DET_STAT, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index ffd739e31bfc..eac7ccaa3c85 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ + { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 4f70df33975a..78b4f64c6b00 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock); static const unsigned int dummy; /* for clarity in register access fns */ enum mos_regs { - THR, /* serial port regs */ - RHR, - IER, - FCR, - ISR, - LCR, - MCR, - LSR, - MSR, - SPR, - DLL, - DLM, - DPR, /* parallel port regs */ - DSR, - DCR, - ECR, - SP1_REG, /* device control regs */ - SP2_REG, /* serial port 2 (7720 only) */ - PP_REG, - SP_CONTROL_REG, + MOS7720_THR, /* serial port regs */ + MOS7720_RHR, + MOS7720_IER, + MOS7720_FCR, + MOS7720_ISR, + MOS7720_LCR, + MOS7720_MCR, + MOS7720_LSR, + MOS7720_MSR, + MOS7720_SPR, + MOS7720_DLL, + MOS7720_DLM, + MOS7720_DPR, /* parallel port regs */ + MOS7720_DSR, + MOS7720_DCR, + MOS7720_ECR, + MOS7720_SP1_REG, /* device control regs */ + MOS7720_SP2_REG, /* serial port 2 (7720 only) */ + MOS7720_PP_REG, + MOS7720_SP_CONTROL_REG, }; /* @@ -150,26 +150,26 @@ enum mos_regs { static inline __u16 get_reg_index(enum mos_regs reg) { static const __u16 mos7715_index_lookup_table[] = { - 0x00, /* THR */ - 0x00, /* RHR */ - 0x01, /* IER */ - 0x02, /* FCR */ - 0x02, /* ISR */ - 0x03, /* LCR */ - 0x04, /* MCR */ - 0x05, /* LSR */ - 0x06, /* MSR */ - 0x07, /* SPR */ - 0x00, /* DLL */ - 0x01, /* DLM */ - 0x00, /* DPR */ - 0x01, /* DSR */ - 0x02, /* DCR */ - 0x0a, /* ECR */ - 0x01, /* SP1_REG */ - 0x02, /* SP2_REG (7720 only) */ - 0x04, /* PP_REG (7715 only) */ - 0x08, /* SP_CONTROL_REG */ + 0x00, /* MOS7720_THR */ + 0x00, /* MOS7720_RHR */ + 0x01, /* MOS7720_IER */ + 0x02, /* MOS7720_FCR */ + 0x02, /* MOS7720_ISR */ + 0x03, /* MOS7720_LCR */ + 0x04, /* MOS7720_MCR */ + 0x05, /* MOS7720_LSR */ + 0x06, /* MOS7720_MSR */ + 0x07, /* MOS7720_SPR */ + 0x00, /* MOS7720_DLL */ + 0x01, /* MOS7720_DLM */ + 0x00, /* MOS7720_DPR */ + 0x01, /* MOS7720_DSR */ + 0x02, /* MOS7720_DCR */ + 0x0a, /* MOS7720_ECR */ + 0x01, /* MOS7720_SP1_REG */ + 0x02, /* MOS7720_SP2_REG (7720 only) */ + 0x04, /* MOS7720_PP_REG (7715 only) */ + 0x08, /* MOS7720_SP_CONTROL_REG */ }; return mos7715_index_lookup_table[reg]; } @@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg) static inline __u16 get_reg_value(enum mos_regs reg, unsigned int serial_portnum) { - if (reg >= SP1_REG) /* control reg */ + if (reg >= MOS7720_SP1_REG) /* control reg */ return 0x0000; - else if (reg >= DPR) /* parallel port reg (7715 only) */ + else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */ return 0x0100; else /* serial port reg */ @@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport, enum mos7715_pp_modes mode) { mos_parport->shadowECR = mode; - write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR, + mos_parport->shadowECR); return 0; } @@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d) if (parport_prologue(pp) < 0) return; mos7715_change_mode(mos_parport, SPP); - write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d); parport_epilogue(pp); } @@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp) if (parport_prologue(pp) < 0) return 0; - read_mos_reg(mos_parport->serial, dummy, DPR, &d); + read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d); parport_epilogue(pp); return d; } @@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d) if (parport_prologue(pp) < 0) return; data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0); - write_mos_reg(mos_parport->serial, dummy, DCR, data); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data); mos_parport->shadowDCR = data; parport_epilogue(pp); } @@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp, if (parport_prologue(pp) < 0) return 0; mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); dcr = mos_parport->shadowDCR & 0x0f; parport_epilogue(pp); return dcr; @@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp) return; mos7715_change_mode(mos_parport, PS2); mos_parport->shadowDCR &= ~0x20; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); parport_epilogue(pp); } @@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp) return; mos7715_change_mode(mos_parport, PS2); mos_parport->shadowDCR |= 0x20; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); parport_epilogue(pp); } @@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp, spin_unlock(&release_lock); return; } - write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR); - write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR); + write_parport_reg_nonblock(mos_parport, MOS7720_DCR, + mos_parport->shadowDCR); + write_parport_reg_nonblock(mos_parport, MOS7720_ECR, + mos_parport->shadowECR); spin_unlock(&release_lock); } @@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial) init_completion(&mos_parport->syncmsg_compl); /* cycle parallel port reset bit */ - write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80); - write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00); + write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80); + write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00); /* initialize device registers */ mos_parport->shadowDCR = DCR_INIT_VAL; - write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, + mos_parport->shadowDCR); mos_parport->shadowECR = ECR_INIT_VAL; - write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); + write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR, + mos_parport->shadowECR); /* register with parport core */ mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE, @@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) /* Initialize MCS7720 -- Write Init values to corresponding Registers * * Register Index - * 0 : THR/RHR - * 1 : IER - * 2 : FCR - * 3 : LCR - * 4 : MCR - * 5 : LSR - * 6 : MSR - * 7 : SPR + * 0 : MOS7720_THR/MOS7720_RHR + * 1 : MOS7720_IER + * 2 : MOS7720_FCR + * 3 : MOS7720_LCR + * 4 : MOS7720_MCR + * 5 : MOS7720_LSR + * 6 : MOS7720_MSR + * 7 : MOS7720_SPR * * 0x08 : SP1/2 Control Reg */ port_number = port->port_number; - read_mos_reg(serial, port_number, LSR, &data); + read_mos_reg(serial, port_number, MOS7720_LSR, &data); dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data); - write_mos_reg(serial, dummy, SP1_REG, 0x02); - write_mos_reg(serial, dummy, SP2_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02); - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); mos7720_port->shadowLCR = 0x03; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); - write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00); - read_mos_reg(serial, dummy, SP_CONTROL_REG, &data); + write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00); + read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data); data = data | (port->port_number + 1); - write_mos_reg(serial, dummy, SP_CONTROL_REG, data); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data); mos7720_port->shadowLCR = 0x83; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, THR, 0x0c); - write_mos_reg(serial, port_number, IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_THR, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); mos7720_port->shadowLCR = 0x03; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); response = usb_submit_urb(port->read_urb, GFP_KERNEL); if (response) @@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port) usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); - write_mos_reg(serial, port->port_number, MCR, 0x00); - write_mos_reg(serial, port->port_number, IER, 0x00); + write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00); + write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00); mos7720_port->open = 0; } @@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state) data = mos7720_port->shadowLCR & ~UART_LCR_SBC; mos7720_port->shadowLCR = data; - write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port->port_number, MOS7720_LCR, + mos7720_port->shadowLCR); } /* @@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty) /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7720_port->shadowMCR &= ~UART_MCR_RTS; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); } } @@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty) /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios.c_cflag & CRTSCTS) { mos7720_port->shadowMCR |= UART_MCR_RTS; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); } } @@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port, dev_dbg(&port->dev, "Sending Setting Commands ..........\n"); port_number = port->port_number; - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00); /*********************************************** * Set for higher rates * ***********************************************/ /* writing baud rate verbatum into uart clock field clearly not right */ if (port_number == 0) - sp_reg = SP1_REG; + sp_reg = MOS7720_SP1_REG; else - sp_reg = SP2_REG; + sp_reg = MOS7720_SP2_REG; write_mos_reg(serial, dummy, sp_reg, baud * 0x10); - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03); mos7720_port->shadowMCR = 0x2b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /*********************************************** * Set DLL/DLM ***********************************************/ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); - write_mos_reg(serial, port_number, DLL, 0x01); - write_mos_reg(serial, port_number, DLM, 0x00); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_DLL, 0x01); + write_mos_reg(serial, port_number, MOS7720_DLM, 0x00); mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); return 0; } @@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port, /* Enable access to divisor latch */ mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; - write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR); /* Write the divisor */ - write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff)); - write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8)); + write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff)); + write_mos_reg(serial, number, MOS7720_DLM, + (__u8)((divisor & 0xff00) >> 8)); /* Disable access to divisor latch */ mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; - write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR); return status; } @@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty, /* Disable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x00); - write_mos_reg(serial, port_number, FCR, 0x00); - write_mos_reg(serial, port_number, FCR, 0xcf); + write_mos_reg(serial, port_number, MOS7720_IER, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0x00); + write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf); /* Send the updated LCR value to the mos7720 */ - write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); + write_mos_reg(serial, port_number, MOS7720_LCR, + mos7720_port->shadowLCR); mos7720_port->shadowMCR = 0x0b; - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /* set up the MCR register and send it to the mos7720 */ mos7720_port->shadowMCR = UART_MCR_OUT2; @@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty, /* To set hardware flow control to the specified * * serial port, in SP1/2_CONTROL_REG */ if (port_number) - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, + 0x01); else - write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); + write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, + 0x02); } else mos7720_port->shadowMCR &= ~(UART_MCR_XONANY); - write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); + write_mos_reg(serial, port_number, MOS7720_MCR, + mos7720_port->shadowMCR); /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); @@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty, if (baud >= 230400) { set_higher_rates(mos7720_port, baud); /* Enable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); return; } @@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty, if (cflag & CBAUD) tty_encode_baud_rate(tty, baud, baud); /* Enable Interrupts */ - write_mos_reg(serial, port_number, IER, 0x0c); + write_mos_reg(serial, port_number, MOS7720_IER, 0x0c); if (port->read_urb->status != -EINPROGRESS) { status = usb_submit_urb(port->read_urb, GFP_KERNEL); @@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty, count = mos7720_chars_in_buffer(tty); if (count == 0) { - read_mos_reg(port->serial, port_number, LSR, &data); + read_mos_reg(port->serial, port_number, MOS7720_LSR, &data); if ((data & (UART_LSR_TEMT | UART_LSR_THRE)) == (UART_LSR_TEMT | UART_LSR_THRE)) { dev_dbg(&port->dev, "%s -- Empty\n", __func__); @@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty, mcr &= ~UART_MCR_LOOP; mos7720_port->shadowMCR = mcr; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); return 0; @@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd, } mos7720_port->shadowMCR = mcr; - write_mos_reg(port->serial, port->port_number, MCR, + write_mos_reg(port->serial, port->port_number, MOS7720_MCR, mos7720_port->shadowMCR); return 0; @@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial) } #endif /* LSR For Port 1 */ - read_mos_reg(serial, 0, LSR, &data); + read_mos_reg(serial, 0, MOS7720_LSR, &data); dev_dbg(&dev->dev, "LSR:%x\n", data); return 0; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index f0c0c53359ad..19b85ee98a72 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1765,6 +1765,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { } /* Terminating entry */ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 529066bbc7e8..46f1f13b41f1 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void) tty_unregister_driver(usb_serial_tty_driver); put_tty_driver(usb_serial_tty_driver); bus_unregister(&usb_serial_bus_type); + idr_destroy(&serial_minors); } diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index c1b03f4235b9..4e7fec36f5c3 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -4,7 +4,7 @@ * Watchdog driver for ARM SP805 watchdog module * * Copyright (C) 2010 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> + * Viresh Kumar <vireshk@kernel.org> * * This file is licensed under the terms of the GNU General Public * License version 2 or later. This program is licensed "as is" without any @@ -303,6 +303,6 @@ static struct amba_driver sp805_wdt_driver = { module_amba_driver(sp805_wdt_driver); -MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); +MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); MODULE_LICENSE("GPL"); |