diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-01-26 13:42:49 +0100 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-01-26 13:42:49 +0100 |
commit | fd47b693c3c694da8210b5a0c01b1e379f4d9865 (patch) | |
tree | 84d8c5596df7f6eb295ac380b8ddb983af6c906d /drivers | |
parent | 89aa57d15f1833323b001c3764141311ac6179a2 (diff) | |
parent | 16753322983bcca0eca6d81f20d23277df0d6cf7 (diff) | |
download | linux-fd47b693c3c694da8210b5a0c01b1e379f4d9865.tar.gz linux-fd47b693c3c694da8210b5a0c01b1e379f4d9865.tar.bz2 linux-fd47b693c3c694da8210b5a0c01b1e379f4d9865.zip |
Merge branch 'arm/smmu' into arm/renesas
Diffstat (limited to 'drivers')
124 files changed, 3105 insertions, 2350 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 5277a0ee5704..b1def411c0b8 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); if (gsi >= 0) { acpi_unregister_gsi(gsi); - dev->irq = 0; dev->irq_managed = 0; } } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index a3a13605a9c4..5f601553b9b0 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -835,6 +835,7 @@ config PATA_AT32 config PATA_AT91 tristate "PATA support for AT91SAM9260" depends on ARM && SOC_AT91SAM9 + depends on !ARCH_MULTIPLATFORM help This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 49f1e6890587..33bb06e006c9 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ - { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index feeb8f1e2fe8..cbcd20810355 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -125,10 +125,11 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) * xgene_ahci_qc_issue - Issue commands to the device * @qc: Command to issue * - * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot - * clear the BSY bit after receiving the PIO setup FIS. This results in the dma - * state machine goes into the CMFatalErrorUpdate state and locks up. By - * restarting the dma engine, it removes the controller out of lock up state. + * Due to Hardware errata for IDENTIFY DEVICE command and PACKET + * command of ATAPI protocol set, the controller cannot clear the BSY bit + * after receiving the PIO setup FIS. This results in the DMA state machine + * going into the CMFatalErrorUpdate state and locks up. By restarting the + * DMA engine, it removes the controller out of lock up state. */ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) { @@ -137,7 +138,8 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) struct xgene_ahci_context *ctx = hpriv->plat_data; int rc = 0; - if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA)) + if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) || + (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET))) xgene_ahci_restart_engine(ap); rc = ahci_qc_issue(qc); @@ -188,7 +190,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev, * * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP */ - id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); + id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8)); return 0; } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 97683e45ab04..61a9c07e0dff 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) devslp = readl(port_mmio + PORT_DEVSLP); if (!(devslp & PORT_DEVSLP_DSP)) { - dev_err(ap->host->dev, "port does not support device sleep\n"); + dev_info(ap->host->dev, "port does not support device sleep\n"); return; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5c84fb5c3372..d1a05f9bb91f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4233,10 +4233,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, /* devices that don't properly handle queued TRIM commands */ - { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + + /* + * As defined, the DRAT (Deterministic Read After Trim) and RZAT + * (Return Zero After Trim) flags in the ATA Command Set are + * unreliable in the sense that they only define what happens if + * the device successfully executed the DSM TRIM command. TRIM + * is only advisory, however, and the device is free to silently + * ignore all or parts of the request. + * + * Whitelist drives that are known to reliably return zeroes + * after TRIM. + */ + + /* + * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude + * that model before whitelisting all other intel SSDs. + */ + { "INTEL*SSDSC2MH*", NULL, 0, }, + + { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, /* * Some WD SATA-I drives spin up and down erratically when the link @@ -4748,7 +4771,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) return NULL; for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { - tag = tag < max_queue ? tag : 0; + if (ap->flags & ATA_FLAG_LOWTAG) + tag = i; + else + tag = tag < max_queue ? tag : 0; /* the last tag is reserved for internal command. */ if (tag == ATA_TAG_INTERNAL) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3dbec8954c86..8d00c2638bed 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2389,6 +2389,7 @@ const char *ata_get_cmd_descript(u8 command) return NULL; } +EXPORT_SYMBOL_GPL(ata_get_cmd_descript); /** * ata_eh_link_report - report error handling to user diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e364e86e84d7..6abd17a85b13 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[15] = lowest_aligned; if (ata_id_has_trim(args->id)) { - rbuf[14] |= 0x80; /* TPE */ + rbuf[14] |= 0x80; /* LBPME */ - if (ata_id_has_zero_after_trim(args->id)) - rbuf[14] |= 0x40; /* TPRZ */ + if (ata_id_has_zero_after_trim(args->id) && + dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { + ata_dev_info(dev, "Enabling discard_zeroes_data\n"); + rbuf[14] |= 0x40; /* LBPRZ */ + } } } - return 0; } diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index db90aa35cb71..2e86e3b85266 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap) DPRINTK("ENTER\n"); cancel_delayed_work_sync(&ap->sff_pio_task); + + /* + * We wanna reset the HSM state to IDLE. If we do so without + * grabbing the port lock, critical sections protected by it which + * expect the HSM state to stay stable may get surprised. For + * example, we may set IDLE in between the time + * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls + * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG(). + */ + spin_lock_irq(ap->lock); ap->hsm_task_state = HSM_ST_IDLE; + spin_unlock_irq(ap->lock); + ap->sff_pio_task_link = NULL; if (ata_msg_ctl(ap)) diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index c7ddef89e7b0..8e8248179d20 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) if (err) { dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" " %d\n", __func__, err); - goto error_out; + return err; } /* Enabe DMA */ @@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) sata_dma_regs); return 0; - -error_out: - dma_dwc_exit(hsdev); - - return err; } static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) @@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) char *ver = (char *)&versionr; u8 *base = NULL; int err = 0; - int irq, rc; + int irq; struct ata_host *host; struct ata_port_info pi = sata_dwc_port_info[0]; const struct ata_port_info *ppi[] = { &pi, NULL }; @@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) if (irq == NO_IRQ) { dev_err(&ofdev->dev, "no SATA DMA irq\n"); err = -ENODEV; - goto error_out; + goto error_iomap; } /* Get physical SATA DMA register base address */ @@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev) dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" " address\n"); err = -ENODEV; - goto error_out; + goto error_iomap; } /* Save dev for later use in dev_xxx() routines */ host_pvt.dwc_dev = &ofdev->dev; /* Initialize AHB DMAC */ - dma_dwc_init(hsdev, irq); + err = dma_dwc_init(hsdev, irq); + if (err) + goto error_dma_iomap; /* Enable SATA Interrupts */ sata_dwc_enable_interrupts(hsdev); @@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) * device discovery process, invoking our port_start() handler & * error_handler() to execute a dummy Softreset EH session */ - rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); - - if (rc != 0) + err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); + if (err) dev_err(&ofdev->dev, "failed to activate host"); dev_set_drvdata(&ofdev->dev, host); @@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) error_out: /* Free SATA DMA resources */ dma_dwc_exit(hsdev); - +error_dma_iomap: + iounmap((void __iomem *)host_pvt.sata_dma_regs); error_iomap: iounmap(base); error_kmalloc: @@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev) /* Free SATA DMA resources */ dma_dwc_exit(hsdev); + iounmap((void __iomem *)host_pvt.sata_dma_regs); iounmap(hsdev->reg_base); kfree(hsdev); kfree(host); diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index d81b20ddb527..ea655949023f 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -246,7 +246,7 @@ enum { /* host flags */ SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | - ATA_FLAG_AN | ATA_FLAG_PMP, + ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG, SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ IRQ_STAT_4PORTS = 0xf, diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index cb529e9a82dd..d826bf3e62c8 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -106,7 +106,7 @@ struct nvme_queue { dma_addr_t cq_dma_addr; u32 __iomem *q_db; u16 q_depth; - u16 cq_vector; + s16 cq_vector; u16 sq_head; u16 sq_tail; u16 cq_head; diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index eb7682dc123b..81bf297f1034 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus, } /* Checks whether the given window number is available */ + +/* On Armada XP, 375 and 38x the MBus window 13 has the remap + * capability, like windows 0 to 7. However, the mvebu-mbus driver + * isn't currently taking into account this special case, which means + * that when window 13 is actually used, the remap registers are left + * to 0, making the device using this MBus window unavailable. The + * quick fix for stable is to not use window 13. A follow up patch + * will correctly handle this window. +*/ static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus, const int win) { void __iomem *addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win); u32 ctrl = readl(addr + WIN_CTRL_OFF); + + if (win == 13) + return false; + return !(ctrl & WIN_CTRL_ENABLE); } diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 0595dc6c453e..f1e33d08dd83 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base) } static void -kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) +kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) { - void __iomem *base = IOMEM(timer_base); int loop_limit = 4; /* @@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) */ while (--loop_limit) { - *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET); - *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET); - if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET)) + *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); + *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); + if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) break; } if (!loop_limit) { diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 9403061a2acc..83564c9cfdbe 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset) writel_relaxed(value, reg_base + offset); if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { - stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; - switch (offset & EXYNOS4_MCT_L_MASK) { + stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; + switch (offset & ~EXYNOS4_MCT_L_MASK) { case MCT_L_TCON_OFFSET: mask = 1 << 3; /* L_TCON write status */ break; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 0f665b8f2461..f150ca82bfaf 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; ced->rating = 200; - ced->cpumask = cpumask_of(0); + ced->cpumask = cpu_possible_mask; ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; ced->suspend = sh_tmu_clock_event_suspend; diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index 55d4803d71b0..3d9e08f7e823 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) { if (pending & BIT(gpio)) { virq = irq_find_mapping(cg->chip.irqdomain, gpio); - generic_handle_irq(virq); + handle_nested_irq(virq); } } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 604dbe60bdee..08261f2b3a82 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) return false; ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); - if (ret < 0) - return false; + if (ret < 0) { + /* We've found the gpio chip, but the translation failed. + * Return true to stop looking and return the translation + * error via out_gpio + */ + gg_data->out_gpio = ERR_PTR(ret); + return true; + } gg_data->out_gpio = gpiochip_get_desc(gc, ret); return true; diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 2ac1800b58bb..f62aa115d79a 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev, return status; } -static const DEVICE_ATTR(value, 0644, +static DEVICE_ATTR(value, 0644, gpio_value_show, gpio_value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) @@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev, return status ? : size; } -static const DEVICE_ATTR(active_low, 0644, +static DEVICE_ATTR(active_low, 0644, gpio_active_low_show, gpio_active_low_store); -static const struct attribute *gpio_attrs[] = { +static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr, + int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct gpio_desc *desc = dev_get_drvdata(dev); + umode_t mode = attr->mode; + bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags); + + if (attr == &dev_attr_direction.attr) { + if (!show_direction) + mode = 0; + } else if (attr == &dev_attr_edge.attr) { + if (gpiod_to_irq(desc) < 0) + mode = 0; + if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags)) + mode = 0; + } + + return mode; +} + +static struct attribute *gpio_attrs[] = { + &dev_attr_direction.attr, + &dev_attr_edge.attr, &dev_attr_value.attr, &dev_attr_active_low.attr, NULL, }; -static const struct attribute_group gpio_attr_group = { - .attrs = (struct attribute **) gpio_attrs, +static const struct attribute_group gpio_group = { + .attrs = gpio_attrs, + .is_visible = gpio_is_visible, +}; + +static const struct attribute_group *gpio_groups[] = { + &gpio_group, + NULL }; /* @@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev, } static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); -static const struct attribute *gpiochip_attrs[] = { +static struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; - -static const struct attribute_group gpiochip_attr_group = { - .attrs = (struct attribute **) gpiochip_attrs, -}; +ATTRIBUTE_GROUPS(gpiochip); /* * /sys/class/gpio/export ... write-only @@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) goto fail_unlock; } - if (!desc->chip->direction_input || !desc->chip->direction_output) - direction_may_change = false; + if (desc->chip->direction_input && desc->chip->direction_output && + direction_may_change) { + set_bit(FLAG_SYSFS_DIR, &desc->flags); + } + spin_unlock_irqrestore(&gpio_lock, flags); offset = gpio_chip_hwgpio(desc); if (desc->chip->names && desc->chip->names[offset]) ioname = desc->chip->names[offset]; - dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), - desc, ioname ? ioname : "gpio%u", - desc_to_gpio(desc)); + dev = device_create_with_groups(&gpio_class, desc->chip->dev, + MKDEV(0, 0), desc, gpio_groups, + ioname ? ioname : "gpio%u", + desc_to_gpio(desc)); if (IS_ERR(dev)) { status = PTR_ERR(dev); goto fail_unlock; } - status = sysfs_create_group(&dev->kobj, &gpio_attr_group); - if (status) - goto fail_unregister_device; - - if (direction_may_change) { - status = device_create_file(dev, &dev_attr_direction); - if (status) - goto fail_unregister_device; - } - - if (gpiod_to_irq(desc) >= 0 && (direction_may_change || - !test_bit(FLAG_IS_OUT, &desc->flags))) { - status = device_create_file(dev, &dev_attr_edge); - if (status) - goto fail_unregister_device; - } - set_bit(FLAG_EXPORT, &desc->flags); mutex_unlock(&sysfs_lock); return 0; -fail_unregister_device: - device_unregister(dev); fail_unlock: mutex_unlock(&sysfs_lock); gpiod_dbg(desc, "%s: status %d\n", __func__, status); @@ -718,6 +729,7 @@ void gpiod_unexport(struct gpio_desc *desc) dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev) { gpio_setup_irq(desc, dev, 0); + clear_bit(FLAG_SYSFS_DIR, &desc->flags); clear_bit(FLAG_EXPORT, &desc->flags); } else status = -ENODEV; @@ -750,13 +762,13 @@ int gpiochip_export(struct gpio_chip *chip) /* use chip->base for the ID; it's already known to be unique */ mutex_lock(&sysfs_lock); - dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, - "gpiochip%d", chip->base); - if (!IS_ERR(dev)) { - status = sysfs_create_group(&dev->kobj, - &gpiochip_attr_group); - } else + dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0), + chip, gpiochip_groups, + "gpiochip%d", chip->base); + if (IS_ERR(dev)) status = PTR_ERR(dev); + else + status = 0; chip->exported = (status == 0); mutex_unlock(&sysfs_lock); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 487afe6f22fc..568aa2b6bdb0 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip) base = gpiochip_find_base(chip->ngpio); if (base < 0) { status = base; - goto unlock; + spin_unlock_irqrestore(&gpio_lock, flags); + goto err_free_descs; } chip->base = base; } status = gpiochip_add_to_list(chip); + if (status) { + spin_unlock_irqrestore(&gpio_lock, flags); + goto err_free_descs; + } - if (status == 0) { - for (id = 0; id < chip->ngpio; id++) { - struct gpio_desc *desc = &descs[id]; - desc->chip = chip; - - /* REVISIT: most hardware initializes GPIOs as - * inputs (often with pullups enabled) so power - * usage is minimized. Linux code should set the - * gpio direction first thing; but until it does, - * and in case chip->get_direction is not set, - * we may expose the wrong direction in sysfs. - */ - desc->flags = !chip->direction_input - ? (1 << FLAG_IS_OUT) - : 0; - } + for (id = 0; id < chip->ngpio; id++) { + struct gpio_desc *desc = &descs[id]; + + desc->chip = chip; + + /* REVISIT: most hardware initializes GPIOs as inputs (often + * with pullups enabled) so power usage is minimized. Linux + * code should set the gpio direction first thing; but until + * it does, and in case chip->get_direction is not set, we may + * expose the wrong direction in sysfs. + */ + desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; } chip->desc = descs; @@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip) of_gpiochip_add(chip); acpi_gpiochip_add(chip); - if (status) - goto fail; - status = gpiochip_export(chip); if (status) - goto fail; + goto err_remove_chip; pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__, chip->base, chip->base + chip->ngpio - 1, @@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip) return 0; -unlock: +err_remove_chip: + acpi_gpiochip_remove(chip); + of_gpiochip_remove(chip); + spin_lock_irqsave(&gpio_lock, flags); + list_del(&chip->list); spin_unlock_irqrestore(&gpio_lock, flags); -fail: - kfree(descs); chip->desc = NULL; +err_free_descs: + kfree(descs); /* failures here can mean systems won't boot... */ pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__, @@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip) unsigned long flags; unsigned id; - acpi_gpiochip_remove(chip); - - spin_lock_irqsave(&gpio_lock, flags); + gpiochip_unexport(chip); gpiochip_irqchip_remove(chip); + + acpi_gpiochip_remove(chip); gpiochip_remove_pin_ranges(chip); of_gpiochip_remove(chip); + spin_lock_irqsave(&gpio_lock, flags); for (id = 0; id < chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); @@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip) list_del(&chip->list); spin_unlock_irqrestore(&gpio_lock, flags); - gpiochip_unexport(chip); kfree(chip->desc); chip->desc = NULL; diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index e3a52113a541..550a5eafbd38 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -77,6 +77,7 @@ struct gpio_desc { #define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ #define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ #define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ +#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */ #define ID_SHIFT 16 /* add new flags before this one */ diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index be6246de5091..307a309110e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -8,7 +8,6 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ kfd_process.o kfd_queue.o kfd_mqd_manager.o \ kfd_kernel_queue.o kfd_packet_manager.o \ - kfd_process_queue_manager.o kfd_device_queue_manager.o \ - kfd_interrupt.o + kfd_process_queue_manager.o kfd_device_queue_manager.o obj-$(CONFIG_HSA_AMD) += amdkfd.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 43884ebd4303..633532a2e7ec 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -192,13 +192,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_topology_add_device_error; } - if (kfd_interrupt_init(kfd)) { - dev_err(kfd_device, - "Error initializing interrupts for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); - goto kfd_interrupt_error; - } - if (!device_iommu_pasid_init(kfd)) { dev_err(kfd_device, "Error initializing iommuv2 for device (%x:%x)\n", @@ -237,8 +230,6 @@ dqm_start_error: device_queue_manager_error: amd_iommu_free_device(kfd->pdev); device_iommu_pasid_error: - kfd_interrupt_exit(kfd); -kfd_interrupt_error: kfd_topology_remove_device(kfd); kfd_topology_add_device_error: kfd2kgd->fini_sa_manager(kfd->kgd); @@ -254,7 +245,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) if (kfd->init_complete) { device_queue_manager_uninit(kfd->dqm); amd_iommu_free_device(kfd->pdev); - kfd_interrupt_exit(kfd); kfd_topology_remove_device(kfd); } @@ -296,13 +286,5 @@ int kgd2kfd_resume(struct kfd_dev *kfd) /* This is called directly from KGD at ISR. */ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { - if (kfd->init_complete) { - spin_lock(&kfd->interrupt_lock); - - if (kfd->interrupts_active - && enqueue_ih_ring_entry(kfd, ih_ring_entry)) - schedule_work(&kfd->interrupt_work); - - spin_unlock(&kfd->interrupt_lock); - } + /* Process interrupts / schedule work as necessary */ } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 9c8961d22360..30c8fda9622e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -280,7 +280,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, q->queue); retval = mqd->load_mqd(mqd, q->mqd, q->pipe, - q->queue, q->properties.write_ptr); + q->queue, (uint32_t __user *) q->properties.write_ptr); if (retval != 0) { deallocate_hqd(dqm, q); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c deleted file mode 100644 index 5b999095a1f7..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * KFD Interrupts. - * - * AMD GPUs deliver interrupts by pushing an interrupt description onto the - * interrupt ring and then sending an interrupt. KGD receives the interrupt - * in ISR and sends us a pointer to each new entry on the interrupt ring. - * - * We generally can't process interrupt-signaled events from ISR, so we call - * out to each interrupt client module (currently only the scheduler) to ask if - * each interrupt is interesting. If they return true, then it requires further - * processing so we copy it to an internal interrupt ring and call each - * interrupt client again from a work-queue. - * - * There's no acknowledgment for the interrupts we use. The hardware simply - * queues a new interrupt each time without waiting. - * - * The fixed-size internal queue means that it's possible for us to lose - * interrupts because we have no back-pressure to the hardware. - */ - -#include <linux/slab.h> -#include <linux/device.h> -#include "kfd_priv.h" - -#define KFD_INTERRUPT_RING_SIZE 256 - -static void interrupt_wq(struct work_struct *); - -int kfd_interrupt_init(struct kfd_dev *kfd) -{ - void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE, - kfd->device_info->ih_ring_entry_size, - GFP_KERNEL); - if (!interrupt_ring) - return -ENOMEM; - - kfd->interrupt_ring = interrupt_ring; - kfd->interrupt_ring_size = - KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size; - atomic_set(&kfd->interrupt_ring_wptr, 0); - atomic_set(&kfd->interrupt_ring_rptr, 0); - - spin_lock_init(&kfd->interrupt_lock); - - INIT_WORK(&kfd->interrupt_work, interrupt_wq); - - kfd->interrupts_active = true; - - /* - * After this function returns, the interrupt will be enabled. This - * barrier ensures that the interrupt running on a different processor - * sees all the above writes. - */ - smp_wmb(); - - return 0; -} - -void kfd_interrupt_exit(struct kfd_dev *kfd) -{ - /* - * Stop the interrupt handler from writing to the ring and scheduling - * workqueue items. The spinlock ensures that any interrupt running - * after we have unlocked sees interrupts_active = false. - */ - unsigned long flags; - - spin_lock_irqsave(&kfd->interrupt_lock, flags); - kfd->interrupts_active = false; - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); - - /* - * Flush_scheduled_work ensures that there are no outstanding - * work-queue items that will access interrupt_ring. New work items - * can't be created because we stopped interrupt handling above. - */ - flush_scheduled_work(); - - kfree(kfd->interrupt_ring); -} - -/* - * This assumes that it can't be called concurrently with itself - * but only with dequeue_ih_ring_entry. - */ -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) -{ - unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr); - unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr); - - if ((rptr - wptr) % kfd->interrupt_ring_size == - kfd->device_info->ih_ring_entry_size) { - /* This is very bad, the system is likely to hang. */ - dev_err_ratelimited(kfd_chardev(), - "Interrupt ring overflow, dropping interrupt.\n"); - return false; - } - - memcpy(kfd->interrupt_ring + wptr, ih_ring_entry, - kfd->device_info->ih_ring_entry_size); - - wptr = (wptr + kfd->device_info->ih_ring_entry_size) % - kfd->interrupt_ring_size; - smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */ - atomic_set(&kfd->interrupt_ring_wptr, wptr); - - return true; -} - -/* - * This assumes that it can't be called concurrently with itself - * but only with enqueue_ih_ring_entry. - */ -static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) -{ - /* - * Assume that wait queues have an implicit barrier, i.e. anything that - * happened in the ISR before it queued work is visible. - */ - - unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr); - unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr); - - if (rptr == wptr) - return false; - - memcpy(ih_ring_entry, kfd->interrupt_ring + rptr, - kfd->device_info->ih_ring_entry_size); - - rptr = (rptr + kfd->device_info->ih_ring_entry_size) % - kfd->interrupt_ring_size; - - /* - * Ensure the rptr write update is not visible until - * memcpy has finished reading. - */ - smp_mb(); - atomic_set(&kfd->interrupt_ring_rptr, rptr); - - return true; -} - -static void interrupt_wq(struct work_struct *work) -{ - struct kfd_dev *dev = container_of(work, struct kfd_dev, - interrupt_work); - - uint32_t ih_ring_entry[DIV_ROUND_UP( - dev->device_info->ih_ring_entry_size, - sizeof(uint32_t))]; - - while (dequeue_ih_ring_entry(dev, ih_ring_entry)) - ; -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index a5edb29507e3..b3dc13c83169 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -135,22 +135,10 @@ struct kfd_dev { struct kgd2kfd_shared_resources shared_resources; - void *interrupt_ring; - size_t interrupt_ring_size; - atomic_t interrupt_ring_rptr; - atomic_t interrupt_ring_wptr; - struct work_struct interrupt_work; - spinlock_t interrupt_lock; - /* QCM Device instance */ struct device_queue_manager *dqm; bool init_complete; - /* - * Interrupts of interest to KFD are copied - * from the HW ring into a SW ring. - */ - bool interrupts_active; }; /* KGD2KFD callbacks */ @@ -531,10 +519,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx); /* Interrupts */ -int kfd_interrupt_init(struct kfd_dev *dev); -void kfd_interrupt_exit(struct kfd_dev *dev); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); /* Power Management */ void kgd2kfd_suspend(struct kfd_dev *kfd); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 52ce26d6b4fb..cf775a4449c1 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -741,7 +741,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) int i, j, rc = 0; int start; - drm_modeset_lock_all(dev); + if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + return -EBUSY; + } if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; @@ -915,7 +917,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, int ret = 0; int i; - drm_modeset_lock_all(dev); + if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + return -EBUSY; + } if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 121470a83d1a..1bcbe07cecfc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -645,18 +645,6 @@ static int exynos_drm_init(void) if (!is_exynos) return -ENODEV; - /* - * Register device object only in case of Exynos SoC. - * - * Below codes resolves temporarily infinite loop issue incurred - * by Exynos drm driver when using multi-platform kernel. - * So these codes will be replaced with more generic way later. - */ - if (!of_machine_is_compatible("samsung,exynos3") && - !of_machine_is_compatible("samsung,exynos4") && - !of_machine_is_compatible("samsung,exynos5")) - return -ENODEV; - exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, NULL, 0); if (IS_ERR(exynos_drm_pdev)) diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 5765a161abdd..98051e8e855a 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata) static void hdmiphy_conf_reset(struct hdmi_context *hdata) { - u8 buffer[2]; u32 reg; clk_disable_unprepare(hdata->res.sclk_hdmi); @@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata) clk_prepare_enable(hdata->res.sclk_hdmi); /* operation mode */ - buffer[0] = 0x1f; - buffer[1] = 0x00; - - if (hdata->hdmiphy_port) - i2c_master_send(hdata->hdmiphy_port, buffer, 2); + hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, + HDMI_PHY_ENABLE_MODE_SET); if (hdata->type == HDMI_TYPE13) reg = HDMI_V13_PHY_RSTOUT; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 820b76234ef4..064ed6597def 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos) static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) { struct mixer_context *mixer_ctx = mgr_to_mixer(mgr); + int err; mutex_lock(&mixer_ctx->mixer_mutex); if (!mixer_ctx->powered) { @@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) } mutex_unlock(&mixer_ctx->mixer_mutex); - drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); + err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); + if (err < 0) { + DRM_DEBUG_KMS("failed to acquire vblank counter\n"); + return; + } atomic_set(&mixer_ctx->wait_vsync_event, 1); @@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) return ret; } - pm_runtime_enable(dev); - return 0; } @@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data) struct mixer_context *ctx = dev_get_drvdata(dev); mixer_mgr_remove(&ctx->manager); - - pm_runtime_disable(dev); } static const struct component_ops mixer_component_ops = { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c11603b4cf1d..76354d3ba925 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5155,7 +5155,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d0d3dfbe6d2a..b051a238baf9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -292,6 +292,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } +u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) +{ + /* + * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) + mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_INFO(dev_priv)->gen >= 8) + mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; + + return mask; +} + void gen6_disable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -304,8 +321,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); - I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? - ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); + I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e2af1383b179..e7a16f119a29 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9815,7 +9815,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (obj->tiling_mode != work->old_fb_obj->tiling_mode) /* vlv: DISPLAY_FLIP fails to change tiling */ ring = NULL; - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { ring = &dev_priv->ring[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { ring = obj->ring; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 25fdbb16d4e0..3b40a17b8852 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -794,6 +794,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_reset_rps_interrupts(struct drm_device *dev); void gen6_enable_rps_interrupts(struct drm_device *dev); void gen6_disable_rps_interrupts(struct drm_device *dev); +u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 964b28e3c630..bf814a64582a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4363,16 +4363,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); mask &= dev_priv->pm_rps_events; - /* IVB and SNB hard hangs on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - */ - if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev)) - mask |= GEN6_PM_RP_UP_EI_EXPIRED; - - if (IS_GEN8(dev_priv->dev)) - mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; - - return ~mask; + return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); } /* gen6_set_rps is called to update the frequency request, but should also be @@ -4441,7 +4432,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) return; /* Mask turbo interrupt so that they will not come in between */ - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMINTRMSK, + gen6_sanitize_rps_pm_mask(dev_priv, ~0)); vlv_force_gfx_clock(dev_priv, true); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dcde3798b45..64fdae558d36 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6033,6 +6033,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm_id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ + WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* compute doesn't have PFP */ if (usepfp) { /* sync PFP to ME, otherwise we might get invalid PFP reads */ diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index dde5c7e29eb2..a0133c74f4cf 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -903,6 +903,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib) void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, unsigned vm_id, uint64_t pd_addr) { + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); if (vm_id < 8) { radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); @@ -943,5 +946,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 1 << vm_id); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* reference */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 360de9f1f491..aea48c89b241 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); radeon_ring_write(ring, 1 << vm_id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 50f88611ff60..4be2bb7cbef3 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -463,5 +463,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, 1 << vm_id); + + /* wait for invalidate to complete */ + radeon_ring_write(ring, DMA_SRBM_READ_PACKET); + radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2)); + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0); /* value */ } diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 2e12e4d69253..ad7125486894 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -1133,6 +1133,23 @@ #define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) + /* 0 - always + * 1 - < + * 2 - <= + * 3 - == + * 4 - != + * 5 - >= + * 6 - > + */ +#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) + /* 0 - reg + * 1 - mem + */ +#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) + /* 0 - me + * 1 - pfp + */ #define PACKET3_MEM_WRITE 0x3D #define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 @@ -1272,6 +1289,13 @@ (1 << 21) | \ (((n) & 0xFFFFF) << 0)) +#define DMA_SRBM_POLL_PACKET ((9 << 28) | \ + (1 << 27) | \ + (1 << 26)) + +#define DMA_SRBM_READ_PACKET ((9 << 28) | \ + (1 << 27)) + /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 850de57069be..121aff6a3b41 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -333,6 +333,20 @@ static struct radeon_asic_ring r300_gfx_ring = { .set_wptr = &r100_gfx_set_wptr, }; +static struct radeon_asic_ring rv515_gfx_ring = { + .ib_execute = &r100_ring_ib_execute, + .emit_fence = &r300_fence_ring_emit, + .emit_semaphore = &r100_semaphore_ring_emit, + .cs_parse = &r300_cs_parse, + .ring_start = &rv515_ring_start, + .ring_test = &r100_ring_test, + .ib_test = &r100_ib_test, + .is_lockup = &r100_gpu_is_lockup, + .get_rptr = &r100_gfx_get_rptr, + .get_wptr = &r100_gfx_get_wptr, + .set_wptr = &r100_gfx_set_wptr, +}; + static struct radeon_asic r300_asic = { .init = &r300_init, .fini = &r300_fini, @@ -748,7 +762,7 @@ static struct radeon_asic rv515_asic = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring + [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -814,7 +828,7 @@ static struct radeon_asic r520_asic = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring + [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring }, .irq = { .set = &rs600_irq_set, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a46f73737994..d0b4f7d1140d 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -576,7 +576,7 @@ error_unreserve: error_free: drm_free_large(vm_bos); - if (r) + if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 32522cc940a1..f7da8fe96a66 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1287,8 +1287,39 @@ dpm_failed: return ret; } +struct radeon_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; +}; + +/* cards with dpm stability problems */ +static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { + /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ + { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, + /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ + { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, + { 0, 0, 0, 0 }, +}; + int radeon_pm_init(struct radeon_device *rdev) { + struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; + bool disable_dpm = false; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + disable_dpm = true; + break; + } + ++p; + } + /* enable dpm on rv6xx+ */ switch (rdev->family) { case CHIP_RV610: @@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev) (!(rdev->flags & RADEON_IS_IGP)) && (!rdev->smc_fw)) rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (disable_dpm && (radeon_dpm == -1)) + rdev->pm.pm_method = PM_METHOD_PROFILE; else if (radeon_dpm == 0) rdev->pm.pm_method = PM_METHOD_PROFILE; else diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 60df444bd075..5d89b874a1a2 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm_id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index f5cc777e1c5f..aa7b872b2c43 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -206,6 +206,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, 1 << vm_id); + + /* wait for invalidate to complete */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); + radeon_ring_write(ring, VM_INVALIDATE_REQUEST); + radeon_ring_write(ring, 0xff << 16); /* retry */ + radeon_ring_write(ring, 1 << vm_id); /* mask */ + radeon_ring_write(ring, 0); /* value */ + radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ } /** diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 32e354b8b0ab..eff8a6444956 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev) return ret; } +struct si_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 max_sclk; + u32 max_mclk; +}; + +/* cards with dpm stability problems */ +static struct si_dpm_quirk si_dpm_quirk_list[] = { + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { 0, 0, 0, 0 }, +}; + static void si_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_ps *rps) { @@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, u32 mclk, sclk; u16 vddc, vddci; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; + u32 max_sclk = 0, max_mclk = 0; int i; + struct si_dpm_quirk *p = si_dpm_quirk_list; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + max_sclk = p->max_sclk; + max_mclk = p->max_mclk; + break; + } + ++p; + } if ((rdev->pm.dpm.new_active_crtc_count > 1) || ni_dpm_vblank_too_short(rdev)) @@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if (ps->performance_levels[i].mclk > max_mclk_vddc) ps->performance_levels[i].mclk = max_mclk_vddc; } + if (max_mclk) { + if (ps->performance_levels[i].mclk > max_mclk) + ps->performance_levels[i].mclk = max_mclk; + } + if (max_sclk) { + if (ps->performance_levels[i].sclk > max_sclk) + ps->performance_levels[i].sclk = max_sclk; + } } /* XXX validate the min clocks required for display */ diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 4069be89e585..84999242c747 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1632,6 +1632,23 @@ #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_COPY_DW 0x3B #define PACKET3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) + /* 0 - always + * 1 - < + * 2 - <= + * 3 - == + * 4 - != + * 5 - >= + * 6 - > + */ +#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) + /* 0 - reg + * 1 - mem + */ +#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) + /* 0 - me + * 1 - pfp + */ #define PACKET3_MEM_WRITE 0x3D #define PACKET3_COPY_DATA 0x40 #define PACKET3_CP_DMA 0x41 @@ -1835,6 +1852,7 @@ #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_SRBM_WRITE 0x9 #define DMA_PACKET_CONSTANT_FILL 0xd +#define DMA_PACKET_POLL_REG_MEM 0xe #define DMA_PACKET_NOP 0xf #define VCE_STATUS 0x20004 diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6529c09c46f0..a7de26d1ac80 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON for those channels specified in the map. This map can be provided either via platform data or the device tree bindings. +config SENSORS_I5500 + tristate "Intel 5500/5520/X58 temperature sensor" + depends on X86 && PCI + help + If you say yes here you get support for the temperature + sensor inside the Intel 5500, 5520 and X58 chipsets. + + This driver can also be built as a module. If so, the module + will be called i5500_temp. + config SENSORS_CORETEMP tristate "Intel Core/Core2/Atom temperature sensor" depends on X86 diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 67280643bcf0..6c941472e707 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o obj-$(CONFIG_SENSORS_HTU21) += htu21.o obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o +obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c new file mode 100644 index 000000000000..3e3ccbf18b4e --- /dev/null +++ b/drivers/hwmon/i5500_temp.c @@ -0,0 +1,149 @@ +/* + * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor + * + * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> + +/* Register definitions from datasheet */ +#define REG_TSTHRCATA 0xE2 +#define REG_TSCTRL 0xE8 +#define REG_TSTHRRPEX 0xEB +#define REG_TSTHRLO 0xEC +#define REG_TSTHRHI 0xEE +#define REG_CTHINT 0xF0 +#define REG_TSFSC 0xF3 +#define REG_CTSTS 0xF4 +#define REG_TSTHRRQPI 0xF5 +#define REG_CTCTRL 0xF7 +#define REG_TSTIMER 0xF8 + +/* + * Sysfs stuff + */ + +/* Sensor resolution : 0.5 degree C */ +static ssize_t show_temp(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + long temp; + u16 tsthrhi; + s8 tsfsc; + + pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi); + pci_read_config_byte(pdev, REG_TSFSC, &tsfsc); + temp = ((long)tsthrhi - tsfsc) * 500; + + return sprintf(buf, "%ld\n", temp); +} + +static ssize_t show_thresh(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + int reg = to_sensor_dev_attr(devattr)->index; + long temp; + u16 tsthr; + + pci_read_config_word(pdev, reg, &tsthr); + temp = tsthr * 500; + + return sprintf(buf, "%ld\n", temp); +} + +static ssize_t show_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + int nr = to_sensor_dev_attr(devattr)->index; + u8 ctsts; + + pci_read_config_byte(pdev, REG_CTSTS, &ctsts); + return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr)); +} + +static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2); +static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE); +static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1); + +static struct attribute *i5500_temp_attrs[] = { + &dev_attr_temp1_input.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(i5500_temp); + +static const struct pci_device_id i5500_temp_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) }, + { 0 }, +}; + +MODULE_DEVICE_TABLE(pci, i5500_temp_ids); + +static int i5500_temp_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err; + struct device *hwmon_dev; + u32 tstimer; + s8 tsfsc; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable device\n"); + return err; + } + + pci_read_config_byte(pdev, REG_TSFSC, &tsfsc); + pci_read_config_dword(pdev, REG_TSTIMER, &tstimer); + if (tsfsc == 0x7F && tstimer == 0x07D30D40) { + dev_notice(&pdev->dev, "Sensor seems to be disabled\n"); + return -ENODEV; + } + + hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, + "intel5500", NULL, + i5500_temp_groups); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static struct pci_driver i5500_temp_driver = { + .name = "i5500_temp", + .id_table = i5500_temp_ids, + .probe = i5500_temp_probe, +}; + +module_pci_driver(i5500_temp_driver); + +MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); +MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 57ecc5b204f3..9117b7a2d5f8 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1114,7 +1114,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_ struct mlx4_dev *dev = to_mdev(qp->device)->dev; int err = 0; - if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || + dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) return 0; /* do nothing */ ib_flow = flow_attr + 1; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index f2b978026407..77ecf6d32237 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1520,6 +1520,8 @@ static int elantech_set_properties(struct elantech_data *etd) case 7: case 8: case 9: + case 10: + case 13: etd->hw_version = 4; break; default: diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index c66d1b53843e..764857b4e268 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -415,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* Acer Aspire 7738 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), + }, + }, + { /* Gericom Bellagio */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), @@ -745,6 +752,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { { } }; +/* + * Some laptops need keyboard reset before probing for the trackpad to get + * it detected, initialised & finally work. + */ +static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { + { + /* Gigabyte P35 v2 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), + }, + }, + { + /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "X3"), + }, + }, + { + /* Gigabyte P34 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P34"), + }, + }, + { } +}; + #endif /* CONFIG_X86 */ #ifdef CONFIG_PNP @@ -1040,6 +1076,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; + if (dmi_check_system(i8042_dmi_kbdreset_table)) + i8042_kbdreset = true; + /* * A20 was already enabled during early kernel init. But some buggy * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 924e4bf357fb..986a71c614b0 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -67,6 +67,10 @@ static bool i8042_notimeout; module_param_named(notimeout, i8042_notimeout, bool, 0); MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); +static bool i8042_kbdreset; +module_param_named(kbdreset, i8042_kbdreset, bool, 0); +MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port"); + #ifdef CONFIG_X86 static bool i8042_dritek; module_param_named(dritek, i8042_dritek, bool, 0); @@ -790,6 +794,16 @@ static int __init i8042_check_aux(void) return -1; /* + * Reset keyboard (needed on some laptops to successfully detect + * touchpad, e.g., some Gigabyte laptop models with Elantech + * touchpads). + */ + if (i8042_kbdreset) { + pr_warn("Attempting to reset device connected to KBD port\n"); + i8042_kbd_write(NULL, (unsigned char) 0xff); + } + +/* * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and * used it for a PCI card or somethig else. */ diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 325188eef1c1..87060ad6829d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -13,6 +13,32 @@ menuconfig IOMMU_SUPPORT if IOMMU_SUPPORT +menu "Generic IOMMU Pagetable Support" + +# Selected by the actual pagetable implementations +config IOMMU_IO_PGTABLE + bool + +config IOMMU_IO_PGTABLE_LPAE + bool "ARMv7/v8 Long Descriptor Format" + select IOMMU_IO_PGTABLE + help + Enable support for the ARM long descriptor pagetable format. + This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page + sizes at both stage-1 and stage-2, as well as address spaces + up to 48-bits in size. + +config IOMMU_IO_PGTABLE_LPAE_SELFTEST + bool "LPAE selftests" + depends on IOMMU_IO_PGTABLE_LPAE + help + Enable self-tests for LPAE page table allocator. This performs + a series of page-table consistency checks during boot. + + If unsure, say N here. + +endmenu + config OF_IOMMU def_bool y depends on OF && IOMMU_API @@ -304,13 +330,13 @@ config SPAPR_TCE_IOMMU config ARM_SMMU bool "ARM Ltd. System MMU (SMMU) Support" - depends on ARM64 || (ARM_LPAE && OF) + depends on ARM64 || ARM select IOMMU_API + select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU if ARM help Support for implementations of the ARM System MMU architecture - versions 1 and 2. The driver supports both v7l and v8l table - formats with 4k and 64k page sizes. + versions 1 and 2. Say Y here if your SoC includes an IOMMU device implementing the ARM SMMU architecture. diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 7b976f294a69..d6889b487d55 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,6 +1,8 @@ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o +obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o +obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 6cd47b75286f..1d6d43bb3395 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -23,8 +23,6 @@ * - Stream-matching and stream-indexing * - v7/v8 long-descriptor format * - Non-secure access to the SMMU - * - 4k and 64k pages, with contiguous pte hints. - * - Up to 48-bit addressing (dependent on VA_BITS) * - Context fault reporting */ @@ -36,7 +34,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iommu.h> -#include <linux/mm.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> @@ -46,7 +44,7 @@ #include <linux/amba/bus.h> -#include <asm/pgalloc.h> +#include "io-pgtable.h" /* Maximum number of stream IDs assigned to a single device */ #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS @@ -71,40 +69,6 @@ ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ ? 0x400 : 0)) -/* Page table bits */ -#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) -#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) -#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) -#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) -#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) -#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) -#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) - -#if PAGE_SIZE == SZ_4K -#define ARM_SMMU_PTE_CONT_ENTRIES 16 -#elif PAGE_SIZE == SZ_64K -#define ARM_SMMU_PTE_CONT_ENTRIES 32 -#else -#define ARM_SMMU_PTE_CONT_ENTRIES 1 -#endif - -#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) -#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) - -/* Stage-1 PTE */ -#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) -#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) -#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 -#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11) - -/* Stage-2 PTE */ -#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) -#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) -#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) -#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) -#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) -#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) - /* Configuration registers */ #define ARM_SMMU_GR0_sCR0 0x0 #define sCR0_CLIENTPD (1 << 0) @@ -132,17 +96,12 @@ #define ARM_SMMU_GR0_sGFSYNR0 0x50 #define ARM_SMMU_GR0_sGFSYNR1 0x54 #define ARM_SMMU_GR0_sGFSYNR2 0x58 -#define ARM_SMMU_GR0_PIDR0 0xfe0 -#define ARM_SMMU_GR0_PIDR1 0xfe4 -#define ARM_SMMU_GR0_PIDR2 0xfe8 #define ID0_S1TS (1 << 30) #define ID0_S2TS (1 << 29) #define ID0_NTS (1 << 28) #define ID0_SMS (1 << 27) -#define ID0_PTFS_SHIFT 24 -#define ID0_PTFS_MASK 0x2 -#define ID0_PTFS_V8_ONLY 0x2 +#define ID0_ATOSNS (1 << 26) #define ID0_CTTW (1 << 14) #define ID0_NUMIRPT_SHIFT 16 #define ID0_NUMIRPT_MASK 0xff @@ -169,11 +128,7 @@ #define ID2_PTFS_16K (1 << 13) #define ID2_PTFS_64K (1 << 14) -#define PIDR2_ARCH_SHIFT 4 -#define PIDR2_ARCH_MASK 0xf - /* Global TLB invalidation */ -#define ARM_SMMU_GR0_STLBIALL 0x60 #define ARM_SMMU_GR0_TLBIVMID 0x64 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 #define ARM_SMMU_GR0_TLBIALLH 0x6c @@ -231,13 +186,25 @@ #define ARM_SMMU_CB_TTBCR2 0x10 #define ARM_SMMU_CB_TTBR0_LO 0x20 #define ARM_SMMU_CB_TTBR0_HI 0x24 +#define ARM_SMMU_CB_TTBR1_LO 0x28 +#define ARM_SMMU_CB_TTBR1_HI 0x2c #define ARM_SMMU_CB_TTBCR 0x30 #define ARM_SMMU_CB_S1_MAIR0 0x38 +#define ARM_SMMU_CB_S1_MAIR1 0x3c +#define ARM_SMMU_CB_PAR_LO 0x50 +#define ARM_SMMU_CB_PAR_HI 0x54 #define ARM_SMMU_CB_FSR 0x58 #define ARM_SMMU_CB_FAR_LO 0x60 #define ARM_SMMU_CB_FAR_HI 0x64 #define ARM_SMMU_CB_FSYNR0 0x68 +#define ARM_SMMU_CB_S1_TLBIVA 0x600 #define ARM_SMMU_CB_S1_TLBIASID 0x610 +#define ARM_SMMU_CB_S1_TLBIVAL 0x620 +#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 +#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 +#define ARM_SMMU_CB_ATS1PR_LO 0x800 +#define ARM_SMMU_CB_ATS1PR_HI 0x804 +#define ARM_SMMU_CB_ATSR 0x8f0 #define SCTLR_S1_ASIDPNE (1 << 12) #define SCTLR_CFCFG (1 << 7) @@ -249,47 +216,16 @@ #define SCTLR_M (1 << 0) #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) -#define RESUME_RETRY (0 << 0) -#define RESUME_TERMINATE (1 << 0) - -#define TTBCR_EAE (1 << 31) +#define CB_PAR_F (1 << 0) -#define TTBCR_PASIZE_SHIFT 16 -#define TTBCR_PASIZE_MASK 0x7 +#define ATSR_ACTIVE (1 << 0) -#define TTBCR_TG0_4K (0 << 14) -#define TTBCR_TG0_64K (1 << 14) - -#define TTBCR_SH0_SHIFT 12 -#define TTBCR_SH0_MASK 0x3 -#define TTBCR_SH_NS 0 -#define TTBCR_SH_OS 2 -#define TTBCR_SH_IS 3 - -#define TTBCR_ORGN0_SHIFT 10 -#define TTBCR_IRGN0_SHIFT 8 -#define TTBCR_RGN_MASK 0x3 -#define TTBCR_RGN_NC 0 -#define TTBCR_RGN_WBWA 1 -#define TTBCR_RGN_WT 2 -#define TTBCR_RGN_WB 3 - -#define TTBCR_SL0_SHIFT 6 -#define TTBCR_SL0_MASK 0x3 -#define TTBCR_SL0_LVL_2 0 -#define TTBCR_SL0_LVL_1 1 - -#define TTBCR_T1SZ_SHIFT 16 -#define TTBCR_T0SZ_SHIFT 0 -#define TTBCR_SZ_MASK 0xf +#define RESUME_RETRY (0 << 0) +#define RESUME_TERMINATE (1 << 0) #define TTBCR2_SEP_SHIFT 15 #define TTBCR2_SEP_MASK 0x7 -#define TTBCR2_PASIZE_SHIFT 0 -#define TTBCR2_PASIZE_MASK 0x7 - -/* Common definitions for PASize and SEP fields */ #define TTBCR2_ADDR_32 0 #define TTBCR2_ADDR_36 1 #define TTBCR2_ADDR_40 2 @@ -297,16 +233,7 @@ #define TTBCR2_ADDR_44 4 #define TTBCR2_ADDR_48 5 -#define TTBRn_HI_ASID_SHIFT 16 - -#define MAIR_ATTR_SHIFT(n) ((n) << 3) -#define MAIR_ATTR_MASK 0xff -#define MAIR_ATTR_DEVICE 0x04 -#define MAIR_ATTR_NC 0x44 -#define MAIR_ATTR_WBRWA 0xff -#define MAIR_ATTR_IDX_NC 0 -#define MAIR_ATTR_IDX_CACHE 1 -#define MAIR_ATTR_IDX_DEV 2 +#define TTBRn_HI_ASID_SHIFT 16 #define FSR_MULTI (1 << 31) #define FSR_SS (1 << 30) @@ -366,6 +293,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) +#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) u32 features; #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) @@ -380,10 +308,9 @@ struct arm_smmu_device { u32 num_mapping_groups; DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); - unsigned long s1_input_size; - unsigned long s1_output_size; - unsigned long s2_input_size; - unsigned long s2_output_size; + unsigned long va_size; + unsigned long ipa_size; + unsigned long pa_size; u32 num_global_irqs; u32 num_context_irqs; @@ -397,7 +324,6 @@ struct arm_smmu_cfg { u8 cbndx; u8 irptndx; u32 cbar; - pgd_t *pgd; }; #define INVALID_IRPTNDX 0xff @@ -412,11 +338,15 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; + struct io_pgtable_ops *pgtbl_ops; + spinlock_t pgtbl_lock; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; - spinlock_t lock; + struct mutex init_mutex; /* Protects smmu pointer */ }; +static struct iommu_ops arm_smmu_ops; + static DEFINE_SPINLOCK(arm_smmu_devices_lock); static LIST_HEAD(arm_smmu_devices); @@ -597,7 +527,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) } /* Wait for any pending TLB invalidations to complete */ -static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) +static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) { int count = 0; void __iomem *gr0_base = ARM_SMMU_GR0(smmu); @@ -615,12 +545,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) } } -static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) +static void arm_smmu_tlb_sync(void *cookie) { + struct arm_smmu_domain *smmu_domain = cookie; + __arm_smmu_tlb_sync(smmu_domain->smmu); +} + +static void arm_smmu_tlb_inv_context(void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *base = ARM_SMMU_GR0(smmu); bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; + void __iomem *base; if (stage1) { base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); @@ -632,9 +569,76 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) base + ARM_SMMU_GR0_TLBIVMID); } - arm_smmu_tlb_sync(smmu); + __arm_smmu_tlb_sync(smmu); +} + +static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, + bool leaf, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_device *smmu = smmu_domain->smmu; + bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; + void __iomem *reg; + + if (stage1) { + reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + + if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { + iova &= ~12UL; + iova |= ARM_SMMU_CB_ASID(cfg); + writel_relaxed(iova, reg); +#ifdef CONFIG_64BIT + } else { + iova >>= 12; + iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; + writeq_relaxed(iova, reg); +#endif + } +#ifdef CONFIG_64BIT + } else if (smmu->version == ARM_SMMU_V2) { + reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : + ARM_SMMU_CB_S2_TLBIIPAS2; + writeq_relaxed(iova >> 12, reg); +#endif + } else { + reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; + writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); + } +} + +static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + + + /* Ensure new page tables are visible to the hardware walker */ + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { + dsb(ishst); + } else { + /* + * If the SMMU can't walk tables in the CPU caches, treat them + * like non-coherent DMA since we need to flush the new entries + * all the way out to memory. There's no possibility of + * recursion here as the SMMU table walker will not be wired + * through another SMMU. + */ + dma_map_page(smmu->dev, virt_to_page(addr), offset, size, + DMA_TO_DEVICE); + } } +static struct iommu_gather_ops arm_smmu_gather_ops = { + .tlb_flush_all = arm_smmu_tlb_inv_context, + .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, + .tlb_sync = arm_smmu_tlb_sync, + .flush_pgtable = arm_smmu_flush_pgtable, +}; + static irqreturn_t arm_smmu_context_fault(int irq, void *dev) { int flags, ret; @@ -712,29 +716,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) return IRQ_HANDLED; } -static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, - size_t size) -{ - unsigned long offset = (unsigned long)addr & ~PAGE_MASK; - - - /* Ensure new page tables are visible to the hardware walker */ - if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { - dsb(ishst); - } else { - /* - * If the SMMU can't walk tables in the CPU caches, treat them - * like non-coherent DMA since we need to flush the new entries - * all the way out to memory. There's no possibility of - * recursion here as the SMMU table walker will not be wired - * through another SMMU. - */ - dma_map_page(smmu->dev, virt_to_page(addr), offset, size, - DMA_TO_DEVICE); - } -} - -static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) +static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, + struct io_pgtable_cfg *pgtbl_cfg) { u32 reg; bool stage1; @@ -771,124 +754,68 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) #else reg = CBA2R_RW64_32BIT; #endif - writel_relaxed(reg, - gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); - - /* TTBCR2 */ - switch (smmu->s1_input_size) { - case 32: - reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); - break; - case 36: - reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); - break; - case 39: - case 40: - reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); - break; - case 42: - reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); - break; - case 44: - reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); - break; - case 48: - reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); - break; - } - - switch (smmu->s1_output_size) { - case 32: - reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); - break; - case 36: - reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); - break; - case 39: - case 40: - reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); - break; - case 42: - reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); - break; - case 44: - reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); - break; - case 48: - reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); - break; - } - - if (stage1) - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); + writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); } - /* TTBR0 */ - arm_smmu_flush_pgtable(smmu, cfg->pgd, - PTRS_PER_PGD * sizeof(pgd_t)); - reg = __pa(cfg->pgd); - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); - reg = (phys_addr_t)__pa(cfg->pgd) >> 32; - if (stage1) + /* TTBRs */ + if (stage1) { + reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); + reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32; reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); - - /* - * TTBCR - * We use long descriptor, with inner-shareable WBWA tables in TTBR0. - */ - if (smmu->version > ARM_SMMU_V1) { - if (PAGE_SIZE == SZ_4K) - reg = TTBCR_TG0_4K; - else - reg = TTBCR_TG0_64K; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); - if (!stage1) { - reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; + reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO); + reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32; + reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI); + } else { + reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); + reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); + } - switch (smmu->s2_output_size) { + /* TTBCR */ + if (stage1) { + reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); + if (smmu->version > ARM_SMMU_V1) { + reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; + switch (smmu->va_size) { case 32: - reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); + reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); break; case 36: - reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); + reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); break; case 40: - reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); + reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); break; case 42: - reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); + reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); break; case 44: - reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); + reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); break; case 48: - reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); + reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); break; } - } else { - reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); } } else { - reg = 0; + reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); } - reg |= TTBCR_EAE | - (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | - (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | - (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); - - if (!stage1) - reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); - - writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); - - /* MAIR0 (stage-1 only) */ + /* MAIRs (stage-1 only) */ if (stage1) { - reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | - (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | - (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); + reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); + reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); } /* SCTLR */ @@ -905,11 +832,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, struct arm_smmu_device *smmu) { int irq, start, ret = 0; - unsigned long flags; + unsigned long ias, oas; + struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable_cfg pgtbl_cfg; + enum io_pgtable_fmt fmt; struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - spin_lock_irqsave(&smmu_domain->lock, flags); + mutex_lock(&smmu_domain->init_mutex); if (smmu_domain->smmu) goto out_unlock; @@ -940,6 +870,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, case ARM_SMMU_DOMAIN_S1: cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; start = smmu->num_s2_context_banks; + ias = smmu->va_size; + oas = smmu->ipa_size; + if (IS_ENABLED(CONFIG_64BIT)) + fmt = ARM_64_LPAE_S1; + else + fmt = ARM_32_LPAE_S1; break; case ARM_SMMU_DOMAIN_NESTED: /* @@ -949,6 +885,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, case ARM_SMMU_DOMAIN_S2: cfg->cbar = CBAR_TYPE_S2_TRANS; start = 0; + ias = smmu->ipa_size; + oas = smmu->pa_size; + if (IS_ENABLED(CONFIG_64BIT)) + fmt = ARM_64_LPAE_S2; + else + fmt = ARM_32_LPAE_S2; break; default: ret = -EINVAL; @@ -968,10 +910,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->irptndx = cfg->cbndx; } - ACCESS_ONCE(smmu_domain->smmu) = smmu; - arm_smmu_init_context_bank(smmu_domain); - spin_unlock_irqrestore(&smmu_domain->lock, flags); + pgtbl_cfg = (struct io_pgtable_cfg) { + .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, + .ias = ias, + .oas = oas, + .tlb = &arm_smmu_gather_ops, + }; + + smmu_domain->smmu = smmu; + pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); + if (!pgtbl_ops) { + ret = -ENOMEM; + goto out_clear_smmu; + } + + /* Update our support page sizes to reflect the page table format */ + arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; + /* Initialise the context bank with our page table cfg */ + arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); + + /* + * Request context fault interrupt. Do this last to avoid the + * handler seeing a half-initialised domain state. + */ irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, "arm-smmu-context-fault", domain); @@ -981,10 +943,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->irptndx = INVALID_IRPTNDX; } + mutex_unlock(&smmu_domain->init_mutex); + + /* Publish page table ops for map/unmap */ + smmu_domain->pgtbl_ops = pgtbl_ops; return 0; +out_clear_smmu: + smmu_domain->smmu = NULL; out_unlock: - spin_unlock_irqrestore(&smmu_domain->lock, flags); + mutex_unlock(&smmu_domain->init_mutex); return ret; } @@ -999,23 +967,27 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) if (!smmu) return; - /* Disable the context bank and nuke the TLB before freeing it. */ + /* + * Disable the context bank and free the page tables before freeing + * it. + */ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); - arm_smmu_tlb_inv_context(smmu_domain); if (cfg->irptndx != INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; free_irq(irq, domain); } + if (smmu_domain->pgtbl_ops) + free_io_pgtable_ops(smmu_domain->pgtbl_ops); + __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); } static int arm_smmu_domain_init(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain; - pgd_t *pgd; /* * Allocate the domain and initialise some of its data structures. @@ -1026,81 +998,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) if (!smmu_domain) return -ENOMEM; - pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); - if (!pgd) - goto out_free_domain; - smmu_domain->cfg.pgd = pgd; - - spin_lock_init(&smmu_domain->lock); + mutex_init(&smmu_domain->init_mutex); + spin_lock_init(&smmu_domain->pgtbl_lock); domain->priv = smmu_domain; return 0; - -out_free_domain: - kfree(smmu_domain); - return -ENOMEM; -} - -static void arm_smmu_free_ptes(pmd_t *pmd) -{ - pgtable_t table = pmd_pgtable(*pmd); - - __free_page(table); -} - -static void arm_smmu_free_pmds(pud_t *pud) -{ - int i; - pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); - - pmd = pmd_base; - for (i = 0; i < PTRS_PER_PMD; ++i) { - if (pmd_none(*pmd)) - continue; - - arm_smmu_free_ptes(pmd); - pmd++; - } - - pmd_free(NULL, pmd_base); -} - -static void arm_smmu_free_puds(pgd_t *pgd) -{ - int i; - pud_t *pud, *pud_base = pud_offset(pgd, 0); - - pud = pud_base; - for (i = 0; i < PTRS_PER_PUD; ++i) { - if (pud_none(*pud)) - continue; - - arm_smmu_free_pmds(pud); - pud++; - } - - pud_free(NULL, pud_base); -} - -static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) -{ - int i; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - pgd_t *pgd, *pgd_base = cfg->pgd; - - /* - * Recursively free the page tables for this domain. We don't - * care about speculative TLB filling because the tables should - * not be active in any context bank at this point (SCTLR.M is 0). - */ - pgd = pgd_base; - for (i = 0; i < PTRS_PER_PGD; ++i) { - if (pgd_none(*pgd)) - continue; - arm_smmu_free_puds(pgd); - pgd++; - } - - kfree(pgd_base); } static void arm_smmu_domain_destroy(struct iommu_domain *domain) @@ -1112,7 +1013,6 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain) * already been detached. */ arm_smmu_destroy_domain_context(domain); - arm_smmu_free_pgtables(smmu_domain); kfree(smmu_domain); } @@ -1244,7 +1144,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret; struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu, *dom_smmu; + struct arm_smmu_device *smmu; struct arm_smmu_master_cfg *cfg; smmu = find_smmu_for_device(dev); @@ -1258,21 +1158,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return -EEXIST; } + /* Ensure that the domain is finalised */ + ret = arm_smmu_init_domain_context(domain, smmu); + if (IS_ERR_VALUE(ret)) + return ret; + /* * Sanity check the domain. We don't support domains across * different SMMUs. */ - dom_smmu = ACCESS_ONCE(smmu_domain->smmu); - if (!dom_smmu) { - /* Now that we have a master, we can finalise the domain */ - ret = arm_smmu_init_domain_context(domain, smmu); - if (IS_ERR_VALUE(ret)) - return ret; - - dom_smmu = smmu_domain->smmu; - } - - if (dom_smmu != smmu) { + if (smmu_domain->smmu != smmu) { dev_err(dev, "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); @@ -1303,293 +1198,103 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) arm_smmu_domain_remove_master(smmu_domain, cfg); } -static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, - unsigned long end) -{ - return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && - (addr + ARM_SMMU_PTE_CONT_SIZE <= end); -} - -static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, - unsigned long addr, unsigned long end, - unsigned long pfn, int prot, int stage) -{ - pte_t *pte, *start; - pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; - - if (pmd_none(*pmd)) { - /* Allocate a new set of tables */ - pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); - - if (!table) - return -ENOMEM; - - arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); - pmd_populate(NULL, pmd, table); - arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); - } - - if (stage == 1) { - pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; - if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) - pteval |= ARM_SMMU_PTE_AP_RDONLY; - - if (prot & IOMMU_CACHE) - pteval |= (MAIR_ATTR_IDX_CACHE << - ARM_SMMU_PTE_ATTRINDX_SHIFT); - } else { - pteval |= ARM_SMMU_PTE_HAP_FAULT; - if (prot & IOMMU_READ) - pteval |= ARM_SMMU_PTE_HAP_READ; - if (prot & IOMMU_WRITE) - pteval |= ARM_SMMU_PTE_HAP_WRITE; - if (prot & IOMMU_CACHE) - pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; - else - pteval |= ARM_SMMU_PTE_MEMATTR_NC; - } - - if (prot & IOMMU_NOEXEC) - pteval |= ARM_SMMU_PTE_XN; - - /* If no access, create a faulting entry to avoid TLB fills */ - if (!(prot & (IOMMU_READ | IOMMU_WRITE))) - pteval &= ~ARM_SMMU_PTE_PAGE; - - pteval |= ARM_SMMU_PTE_SH_IS; - start = pmd_page_vaddr(*pmd) + pte_index(addr); - pte = start; - - /* - * Install the page table entries. This is fairly complicated - * since we attempt to make use of the contiguous hint in the - * ptes where possible. The contiguous hint indicates a series - * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically - * contiguous region with the following constraints: - * - * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE - * - Each pte in the region has the contiguous hint bit set - * - * This complicates unmapping (also handled by this code, when - * neither IOMMU_READ or IOMMU_WRITE are set) because it is - * possible, yet highly unlikely, that a client may unmap only - * part of a contiguous range. This requires clearing of the - * contiguous hint bits in the range before installing the new - * faulting entries. - * - * Note that re-mapping an address range without first unmapping - * it is not supported, so TLB invalidation is not required here - * and is instead performed at unmap and domain-init time. - */ - do { - int i = 1; - - pteval &= ~ARM_SMMU_PTE_CONT; - - if (arm_smmu_pte_is_contiguous_range(addr, end)) { - i = ARM_SMMU_PTE_CONT_ENTRIES; - pteval |= ARM_SMMU_PTE_CONT; - } else if (pte_val(*pte) & - (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { - int j; - pte_t *cont_start; - unsigned long idx = pte_index(addr); - - idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); - cont_start = pmd_page_vaddr(*pmd) + idx; - for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) - pte_val(*(cont_start + j)) &= - ~ARM_SMMU_PTE_CONT; - - arm_smmu_flush_pgtable(smmu, cont_start, - sizeof(*pte) * - ARM_SMMU_PTE_CONT_ENTRIES); - } - - do { - *pte = pfn_pte(pfn, __pgprot(pteval)); - } while (pte++, pfn++, addr += PAGE_SIZE, --i); - } while (addr != end); - - arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); - return 0; -} - -static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, - unsigned long addr, unsigned long end, - phys_addr_t phys, int prot, int stage) +static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) { int ret; - pmd_t *pmd; - unsigned long next, pfn = __phys_to_pfn(phys); - -#ifndef __PAGETABLE_PMD_FOLDED - if (pud_none(*pud)) { - pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); - if (!pmd) - return -ENOMEM; - - arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); - pud_populate(NULL, pud, pmd); - arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); - - pmd += pmd_index(addr); - } else -#endif - pmd = pmd_offset(pud, addr); + unsigned long flags; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - do { - next = pmd_addr_end(addr, end); - ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn, - prot, stage); - phys += next - addr; - pfn = __phys_to_pfn(phys); - } while (pmd++, addr = next, addr < end); + if (!ops) + return -ENODEV; + spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); + ret = ops->map(ops, iova, paddr, size, prot); + spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); return ret; } -static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, - unsigned long addr, unsigned long end, - phys_addr_t phys, int prot, int stage) +static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size) { - int ret = 0; - pud_t *pud; - unsigned long next; - -#ifndef __PAGETABLE_PUD_FOLDED - if (pgd_none(*pgd)) { - pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); - if (!pud) - return -ENOMEM; - - arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); - pgd_populate(NULL, pgd, pud); - arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); - - pud += pud_index(addr); - } else -#endif - pud = pud_offset(pgd, addr); + size_t ret; + unsigned long flags; + struct arm_smmu_domain *smmu_domain = domain->priv; + struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - do { - next = pud_addr_end(addr, end); - ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, - prot, stage); - phys += next - addr; - } while (pud++, addr = next, addr < end); + if (!ops) + return 0; + spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); + ret = ops->unmap(ops, iova, size); + spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); return ret; } -static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, - unsigned long iova, phys_addr_t paddr, - size_t size, int prot) +static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, + dma_addr_t iova) { - int ret, stage; - unsigned long end; - phys_addr_t input_mask, output_mask; + struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - pgd_t *pgd = cfg->pgd; - unsigned long flags; + struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct device *dev = smmu->dev; + void __iomem *cb_base; + u32 tmp; + u64 phys; - if (cfg->cbar == CBAR_TYPE_S2_TRANS) { - stage = 2; - input_mask = (1ULL << smmu->s2_input_size) - 1; - output_mask = (1ULL << smmu->s2_output_size) - 1; + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); + + if (smmu->version == 1) { + u32 reg = iova & ~0xfff; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); } else { - stage = 1; - input_mask = (1ULL << smmu->s1_input_size) - 1; - output_mask = (1ULL << smmu->s1_output_size) - 1; + u32 reg = iova & ~0xfff; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); + reg = (iova & ~0xfff) >> 32; + writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI); } - if (!pgd) - return -EINVAL; - - if (size & ~PAGE_MASK) - return -EINVAL; - - if ((phys_addr_t)iova & ~input_mask) - return -ERANGE; - - if (paddr & ~output_mask) - return -ERANGE; - - spin_lock_irqsave(&smmu_domain->lock, flags); - pgd += pgd_index(iova); - end = iova + size; - do { - unsigned long next = pgd_addr_end(iova, end); - - ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, - prot, stage); - if (ret) - goto out_unlock; - - paddr += next - iova; - iova = next; - } while (pgd++, iova != end); - -out_unlock: - spin_unlock_irqrestore(&smmu_domain->lock, flags); - - return ret; -} - -static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) -{ - struct arm_smmu_domain *smmu_domain = domain->priv; - - if (!smmu_domain) - return -ENODEV; + if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, + !(tmp & ATSR_ACTIVE), 5, 50)) { + dev_err(dev, + "iova to phys timed out on 0x%pad. Falling back to software table walk.\n", + &iova); + return ops->iova_to_phys(ops, iova); + } - return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); -} + phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); + phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; -static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) -{ - int ret; - struct arm_smmu_domain *smmu_domain = domain->priv; + if (phys & CB_PAR_F) { + dev_err(dev, "translation fault!\n"); + dev_err(dev, "PAR = 0x%llx\n", phys); + return 0; + } - ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); - arm_smmu_tlb_inv_context(smmu_domain); - return ret ? 0 : size; + return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); } static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, - dma_addr_t iova) + dma_addr_t iova) { - pgd_t *pgdp, pgd; - pud_t pud; - pmd_t pmd; - pte_t pte; + phys_addr_t ret; + unsigned long flags; struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - pgdp = cfg->pgd; - if (!pgdp) + if (!ops) return 0; - pgd = *(pgdp + pgd_index(iova)); - if (pgd_none(pgd)) - return 0; - - pud = *pud_offset(&pgd, iova); - if (pud_none(pud)) - return 0; - - pmd = *pmd_offset(&pud, iova); - if (pmd_none(pmd)) - return 0; + spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS) + ret = arm_smmu_iova_to_phys_hard(domain, iova); + else + ret = ops->iova_to_phys(ops, iova); + spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); - if (pte_none(pte)) - return 0; - - return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); + return ret; } static bool arm_smmu_capable(enum iommu_cap cap) @@ -1698,24 +1403,34 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, static int arm_smmu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) { + int ret = 0; struct arm_smmu_domain *smmu_domain = domain->priv; + mutex_lock(&smmu_domain->init_mutex); + switch (attr) { case DOMAIN_ATTR_NESTING: - if (smmu_domain->smmu) - return -EPERM; + if (smmu_domain->smmu) { + ret = -EPERM; + goto out_unlock; + } + if (*(int *)data) smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; else smmu_domain->stage = ARM_SMMU_DOMAIN_S1; - return 0; + break; default: - return -ENODEV; + ret = -ENODEV; } + +out_unlock: + mutex_unlock(&smmu_domain->init_mutex); + return ret; } -static const struct iommu_ops arm_smmu_ops = { +static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_init = arm_smmu_domain_init, .domain_destroy = arm_smmu_domain_destroy, @@ -1729,9 +1444,7 @@ static const struct iommu_ops arm_smmu_ops = { .remove_device = arm_smmu_remove_device, .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, - .pgsize_bitmap = (SECTION_SIZE | - ARM_SMMU_PTE_CONT_SIZE | - PAGE_SIZE), + .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) @@ -1760,7 +1473,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) } /* Invalidate the TLB, just in case */ - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); @@ -1782,7 +1494,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); /* Push the button */ - arm_smmu_tlb_sync(smmu); + __arm_smmu_tlb_sync(smmu); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); } @@ -1816,12 +1528,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) /* ID0 */ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); -#ifndef CONFIG_64BIT - if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { - dev_err(smmu->dev, "\tno v7 descriptor support!\n"); - return -ENODEV; - } -#endif /* Restrict available stages based on module parameter */ if (force_stage == 1) @@ -1850,6 +1556,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENODEV; } + if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) { + smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; + dev_notice(smmu->dev, "\taddress translation ops\n"); + } + if (id & ID0_CTTW) { smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; dev_notice(smmu->dev, "\tcoherent table walk\n"); @@ -1894,16 +1605,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; /* Check for size mismatch of SMMU address space from mapped region */ - size = 1 << - (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); + size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); size *= 2 << smmu->pgshift; if (smmu->size != size) dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", size, smmu->size); - smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & - ID1_NUMS2CB_MASK; + smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; if (smmu->num_s2_context_banks > smmu->num_context_banks) { dev_err(smmu->dev, "impossible number of S2 context banks!\n"); @@ -1915,46 +1624,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) /* ID2 */ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); - smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); - - /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */ -#ifdef CONFIG_64BIT - smmu->s2_input_size = min_t(unsigned long, VA_BITS, size); -#else - smmu->s2_input_size = min(32UL, size); -#endif + smmu->ipa_size = size; - /* The stage-2 output mask is also applied for bypass */ + /* The output mask is also applied for bypass */ size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); - smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); + smmu->pa_size = size; if (smmu->version == ARM_SMMU_V1) { - smmu->s1_input_size = 32; + smmu->va_size = smmu->ipa_size; + size = SZ_4K | SZ_2M | SZ_1G; } else { -#ifdef CONFIG_64BIT size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; - size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); -#else - size = 32; + smmu->va_size = arm_smmu_id_size_to_bits(size); +#ifndef CONFIG_64BIT + smmu->va_size = min(32UL, smmu->va_size); #endif - smmu->s1_input_size = size; - - if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || - (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || - (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { - dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", - PAGE_SIZE); - return -ENODEV; - } + size = 0; + if (id & ID2_PTFS_4K) + size |= SZ_4K | SZ_2M | SZ_1G; + if (id & ID2_PTFS_16K) + size |= SZ_16K | SZ_32M; + if (id & ID2_PTFS_64K) + size |= SZ_64K | SZ_512M; } + arm_smmu_ops.pgsize_bitmap &= size; + dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); + if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", - smmu->s1_input_size, smmu->s1_output_size); + smmu->va_size, smmu->ipa_size); if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", - smmu->s2_input_size, smmu->s2_output_size); + smmu->ipa_size, smmu->pa_size); return 0; } diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c new file mode 100644 index 000000000000..5a500edf00cc --- /dev/null +++ b/drivers/iommu/io-pgtable-arm.c @@ -0,0 +1,986 @@ +/* + * CPU-agnostic ARM page table allocator. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt + +#include <linux/iommu.h> +#include <linux/kernel.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "io-pgtable.h" + +#define ARM_LPAE_MAX_ADDR_BITS 48 +#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 +#define ARM_LPAE_MAX_LEVELS 4 + +/* Struct accessors */ +#define io_pgtable_to_data(x) \ + container_of((x), struct arm_lpae_io_pgtable, iop) + +#define io_pgtable_ops_to_pgtable(x) \ + container_of((x), struct io_pgtable, ops) + +#define io_pgtable_ops_to_data(x) \ + io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) + +/* + * For consistency with the architecture, we always consider + * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 + */ +#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) + +/* + * Calculate the right shift amount to get to the portion describing level l + * in a virtual address mapped by the pagetable in d. + */ +#define ARM_LPAE_LVL_SHIFT(l,d) \ + ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ + * (d)->bits_per_level) + (d)->pg_shift) + +#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) + +/* + * Calculate the index at level l used to map virtual address a using the + * pagetable in d. + */ +#define ARM_LPAE_PGD_IDX(l,d) \ + ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) + +#define ARM_LPAE_LVL_IDX(a,l,d) \ + (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ + ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) + +/* Calculate the block/page mapping size at level l for pagetable in d. */ +#define ARM_LPAE_BLOCK_SIZE(l,d) \ + (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ + ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) + +/* Page table bits */ +#define ARM_LPAE_PTE_TYPE_SHIFT 0 +#define ARM_LPAE_PTE_TYPE_MASK 0x3 + +#define ARM_LPAE_PTE_TYPE_BLOCK 1 +#define ARM_LPAE_PTE_TYPE_TABLE 3 +#define ARM_LPAE_PTE_TYPE_PAGE 3 + +#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) +#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) +#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) +#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) +#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) +#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) +#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) +#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) + +#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) +/* Ignore the contiguous bit for block splitting */ +#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) +#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ + ARM_LPAE_PTE_ATTR_HI_MASK) + +/* Stage-1 PTE */ +#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) +#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) +#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 +#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) + +/* Stage-2 PTE */ +#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) +#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) +#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) +#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) +#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) +#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) + +/* Register bits */ +#define ARM_32_LPAE_TCR_EAE (1 << 31) +#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) + +#define ARM_LPAE_TCR_TG0_4K (0 << 14) +#define ARM_LPAE_TCR_TG0_64K (1 << 14) +#define ARM_LPAE_TCR_TG0_16K (2 << 14) + +#define ARM_LPAE_TCR_SH0_SHIFT 12 +#define ARM_LPAE_TCR_SH0_MASK 0x3 +#define ARM_LPAE_TCR_SH_NS 0 +#define ARM_LPAE_TCR_SH_OS 2 +#define ARM_LPAE_TCR_SH_IS 3 + +#define ARM_LPAE_TCR_ORGN0_SHIFT 10 +#define ARM_LPAE_TCR_IRGN0_SHIFT 8 +#define ARM_LPAE_TCR_RGN_MASK 0x3 +#define ARM_LPAE_TCR_RGN_NC 0 +#define ARM_LPAE_TCR_RGN_WBWA 1 +#define ARM_LPAE_TCR_RGN_WT 2 +#define ARM_LPAE_TCR_RGN_WB 3 + +#define ARM_LPAE_TCR_SL0_SHIFT 6 +#define ARM_LPAE_TCR_SL0_MASK 0x3 + +#define ARM_LPAE_TCR_T0SZ_SHIFT 0 +#define ARM_LPAE_TCR_SZ_MASK 0xf + +#define ARM_LPAE_TCR_PS_SHIFT 16 +#define ARM_LPAE_TCR_PS_MASK 0x7 + +#define ARM_LPAE_TCR_IPS_SHIFT 32 +#define ARM_LPAE_TCR_IPS_MASK 0x7 + +#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL +#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL +#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL +#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL +#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL +#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL + +#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) +#define ARM_LPAE_MAIR_ATTR_MASK 0xff +#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 +#define ARM_LPAE_MAIR_ATTR_NC 0x44 +#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff +#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 +#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 +#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 + +/* IOPTE accessors */ +#define iopte_deref(pte,d) \ + (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ + & ~((1ULL << (d)->pg_shift) - 1))) + +#define iopte_type(pte,l) \ + (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) + +#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) + +#define iopte_leaf(pte,l) \ + (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ + (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ + (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) + +#define iopte_to_pfn(pte,d) \ + (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) + +#define pfn_to_iopte(pfn,d) \ + (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) + +struct arm_lpae_io_pgtable { + struct io_pgtable iop; + + int levels; + size_t pgd_size; + unsigned long pg_shift; + unsigned long bits_per_level; + + void *pgd; +}; + +typedef u64 arm_lpae_iopte; + +static bool selftest_running = false; + +static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, + unsigned long iova, phys_addr_t paddr, + arm_lpae_iopte prot, int lvl, + arm_lpae_iopte *ptep) +{ + arm_lpae_iopte pte = prot; + + /* We require an unmap first */ + if (iopte_leaf(*ptep, lvl)) { + WARN_ON(!selftest_running); + return -EEXIST; + } + + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) + pte |= ARM_LPAE_PTE_NS; + + if (lvl == ARM_LPAE_MAX_LEVELS - 1) + pte |= ARM_LPAE_PTE_TYPE_PAGE; + else + pte |= ARM_LPAE_PTE_TYPE_BLOCK; + + pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; + pte |= pfn_to_iopte(paddr >> data->pg_shift, data); + + *ptep = pte; + data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie); + return 0; +} + +static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, + phys_addr_t paddr, size_t size, arm_lpae_iopte prot, + int lvl, arm_lpae_iopte *ptep) +{ + arm_lpae_iopte *cptep, pte; + void *cookie = data->iop.cookie; + size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); + + /* Find our entry at the current level */ + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + + /* If we can install a leaf entry at this level, then do so */ + if (size == block_size && (size & data->iop.cfg.pgsize_bitmap)) + return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); + + /* We can't allocate tables at the final level */ + if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) + return -EINVAL; + + /* Grab a pointer to the next level */ + pte = *ptep; + if (!pte) { + cptep = alloc_pages_exact(1UL << data->pg_shift, + GFP_ATOMIC | __GFP_ZERO); + if (!cptep) + return -ENOMEM; + + data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift, + cookie); + pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) + pte |= ARM_LPAE_PTE_NSTABLE; + *ptep = pte; + data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); + } else { + cptep = iopte_deref(pte, data); + } + + /* Rinse, repeat */ + return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); +} + +static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, + int prot) +{ + arm_lpae_iopte pte; + + if (data->iop.fmt == ARM_64_LPAE_S1 || + data->iop.fmt == ARM_32_LPAE_S1) { + pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; + + if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) + pte |= ARM_LPAE_PTE_AP_RDONLY; + + if (prot & IOMMU_CACHE) + pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE + << ARM_LPAE_PTE_ATTRINDX_SHIFT); + } else { + pte = ARM_LPAE_PTE_HAP_FAULT; + if (prot & IOMMU_READ) + pte |= ARM_LPAE_PTE_HAP_READ; + if (prot & IOMMU_WRITE) + pte |= ARM_LPAE_PTE_HAP_WRITE; + if (prot & IOMMU_CACHE) + pte |= ARM_LPAE_PTE_MEMATTR_OIWB; + else + pte |= ARM_LPAE_PTE_MEMATTR_NC; + } + + if (prot & IOMMU_NOEXEC) + pte |= ARM_LPAE_PTE_XN; + + return pte; +} + +static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int iommu_prot) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte *ptep = data->pgd; + int lvl = ARM_LPAE_START_LVL(data); + arm_lpae_iopte prot; + + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + + prot = arm_lpae_prot_to_pte(data, iommu_prot); + return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); +} + +static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, + arm_lpae_iopte *ptep) +{ + arm_lpae_iopte *start, *end; + unsigned long table_size; + + /* Only leaf entries at the last level */ + if (lvl == ARM_LPAE_MAX_LEVELS - 1) + return; + + if (lvl == ARM_LPAE_START_LVL(data)) + table_size = data->pgd_size; + else + table_size = 1UL << data->pg_shift; + + start = ptep; + end = (void *)ptep + table_size; + + while (ptep != end) { + arm_lpae_iopte pte = *ptep++; + + if (!pte || iopte_leaf(pte, lvl)) + continue; + + __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); + } + + free_pages_exact(start, table_size); +} + +static void arm_lpae_free_pgtable(struct io_pgtable *iop) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); + + __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); + kfree(data); +} + +static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, + unsigned long iova, size_t size, + arm_lpae_iopte prot, int lvl, + arm_lpae_iopte *ptep, size_t blk_size) +{ + unsigned long blk_start, blk_end; + phys_addr_t blk_paddr; + arm_lpae_iopte table = 0; + void *cookie = data->iop.cookie; + const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; + + blk_start = iova & ~(blk_size - 1); + blk_end = blk_start + blk_size; + blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; + + for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { + arm_lpae_iopte *tablep; + + /* Unmap! */ + if (blk_start == iova) + continue; + + /* __arm_lpae_map expects a pointer to the start of the table */ + tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); + if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, + tablep) < 0) { + if (table) { + /* Free the table we allocated */ + tablep = iopte_deref(table, data); + __arm_lpae_free_pgtable(data, lvl + 1, tablep); + } + return 0; /* Bytes unmapped */ + } + } + + *ptep = table; + tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); + iova &= ~(blk_size - 1); + tlb->tlb_add_flush(iova, blk_size, true, cookie); + return size; +} + +static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, + unsigned long iova, size_t size, int lvl, + arm_lpae_iopte *ptep) +{ + arm_lpae_iopte pte; + const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; + void *cookie = data->iop.cookie; + size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); + + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + pte = *ptep; + + /* Something went horribly wrong and we ran out of page table */ + if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS))) + return 0; + + /* If the size matches this level, we're in the right place */ + if (size == blk_size) { + *ptep = 0; + tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); + + if (!iopte_leaf(pte, lvl)) { + /* Also flush any partial walks */ + tlb->tlb_add_flush(iova, size, false, cookie); + tlb->tlb_sync(data->iop.cookie); + ptep = iopte_deref(pte, data); + __arm_lpae_free_pgtable(data, lvl + 1, ptep); + } else { + tlb->tlb_add_flush(iova, size, true, cookie); + } + + return size; + } else if (iopte_leaf(pte, lvl)) { + /* + * Insert a table at the next level to map the old region, + * minus the part we want to unmap + */ + return arm_lpae_split_blk_unmap(data, iova, size, + iopte_prot(pte), lvl, ptep, + blk_size); + } + + /* Keep on walkin' */ + ptep = iopte_deref(pte, data); + return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); +} + +static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, + size_t size) +{ + size_t unmapped; + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable *iop = &data->iop; + arm_lpae_iopte *ptep = data->pgd; + int lvl = ARM_LPAE_START_LVL(data); + + unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); + if (unmapped) + iop->cfg.tlb->tlb_sync(iop->cookie); + + return unmapped; +} + +static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + arm_lpae_iopte pte, *ptep = data->pgd; + int lvl = ARM_LPAE_START_LVL(data); + + do { + /* Valid IOPTE pointer? */ + if (!ptep) + return 0; + + /* Grab the IOPTE we're interested in */ + pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); + + /* Valid entry? */ + if (!pte) + return 0; + + /* Leaf entry? */ + if (iopte_leaf(pte,lvl)) + goto found_translation; + + /* Take it to the next level */ + ptep = iopte_deref(pte, data); + } while (++lvl < ARM_LPAE_MAX_LEVELS); + + /* Ran out of page tables to walk */ + return 0; + +found_translation: + iova &= ((1 << data->pg_shift) - 1); + return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; +} + +static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) +{ + unsigned long granule; + + /* + * We need to restrict the supported page sizes to match the + * translation regime for a particular granule. Aim to match + * the CPU page size if possible, otherwise prefer smaller sizes. + * While we're at it, restrict the block sizes to match the + * chosen granule. + */ + if (cfg->pgsize_bitmap & PAGE_SIZE) + granule = PAGE_SIZE; + else if (cfg->pgsize_bitmap & ~PAGE_MASK) + granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); + else if (cfg->pgsize_bitmap & PAGE_MASK) + granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); + else + granule = 0; + + switch (granule) { + case SZ_4K: + cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); + break; + case SZ_16K: + cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); + break; + case SZ_64K: + cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); + break; + default: + cfg->pgsize_bitmap = 0; + } +} + +static struct arm_lpae_io_pgtable * +arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) +{ + unsigned long va_bits, pgd_bits; + struct arm_lpae_io_pgtable *data; + + arm_lpae_restrict_pgsizes(cfg); + + if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) + return NULL; + + if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) + return NULL; + + if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) + return NULL; + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return NULL; + + data->pg_shift = __ffs(cfg->pgsize_bitmap); + data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); + + va_bits = cfg->ias - data->pg_shift; + data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); + + /* Calculate the actual size of our pgd (without concatenation) */ + pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); + data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); + + data->iop.ops = (struct io_pgtable_ops) { + .map = arm_lpae_map, + .unmap = arm_lpae_unmap, + .iova_to_phys = arm_lpae_iova_to_phys, + }; + + return data; +} + +static struct io_pgtable * +arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) +{ + u64 reg; + struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); + + if (!data) + return NULL; + + /* TCR */ + reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + + switch (1 << data->pg_shift) { + case SZ_4K: + reg |= ARM_LPAE_TCR_TG0_4K; + break; + case SZ_16K: + reg |= ARM_LPAE_TCR_TG0_16K; + break; + case SZ_64K: + reg |= ARM_LPAE_TCR_TG0_64K; + break; + } + + switch (cfg->oas) { + case 32: + reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 36: + reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 40: + reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 42: + reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 44: + reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + case 48: + reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); + break; + default: + goto out_free_data; + } + + reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; + cfg->arm_lpae_s1_cfg.tcr = reg; + + /* MAIRs */ + reg = (ARM_LPAE_MAIR_ATTR_NC + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | + (ARM_LPAE_MAIR_ATTR_WBRWA + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | + (ARM_LPAE_MAIR_ATTR_DEVICE + << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); + + cfg->arm_lpae_s1_cfg.mair[0] = reg; + cfg->arm_lpae_s1_cfg.mair[1] = 0; + + /* Looking good; allocate a pgd */ + data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); + if (!data->pgd) + goto out_free_data; + + cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); + + /* TTBRs */ + cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); + cfg->arm_lpae_s1_cfg.ttbr[1] = 0; + return &data->iop; + +out_free_data: + kfree(data); + return NULL; +} + +static struct io_pgtable * +arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) +{ + u64 reg, sl; + struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); + + if (!data) + return NULL; + + /* + * Concatenate PGDs at level 1 if possible in order to reduce + * the depth of the stage-2 walk. + */ + if (data->levels == ARM_LPAE_MAX_LEVELS) { + unsigned long pgd_pages; + + pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); + if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { + data->pgd_size = pgd_pages << data->pg_shift; + data->levels--; + } + } + + /* VTCR */ + reg = ARM_64_LPAE_S2_TCR_RES1 | + (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + + sl = ARM_LPAE_START_LVL(data); + + switch (1 << data->pg_shift) { + case SZ_4K: + reg |= ARM_LPAE_TCR_TG0_4K; + sl++; /* SL0 format is different for 4K granule size */ + break; + case SZ_16K: + reg |= ARM_LPAE_TCR_TG0_16K; + break; + case SZ_64K: + reg |= ARM_LPAE_TCR_TG0_64K; + break; + } + + switch (cfg->oas) { + case 32: + reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); + break; + case 36: + reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); + break; + case 40: + reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); + break; + case 42: + reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); + break; + case 44: + reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); + break; + case 48: + reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); + break; + default: + goto out_free_data; + } + + reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; + reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; + cfg->arm_lpae_s2_cfg.vtcr = reg; + + /* Allocate pgd pages */ + data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); + if (!data->pgd) + goto out_free_data; + + cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); + + /* VTTBR */ + cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); + return &data->iop; + +out_free_data: + kfree(data); + return NULL; +} + +static struct io_pgtable * +arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) +{ + struct io_pgtable *iop; + + if (cfg->ias > 32 || cfg->oas > 40) + return NULL; + + cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); + iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); + if (iop) { + cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; + cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; + } + + return iop; +} + +static struct io_pgtable * +arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) +{ + struct io_pgtable *iop; + + if (cfg->ias > 40 || cfg->oas > 40) + return NULL; + + cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); + iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); + if (iop) + cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; + + return iop; +} + +struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { + .alloc = arm_64_lpae_alloc_pgtable_s1, + .free = arm_lpae_free_pgtable, +}; + +struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { + .alloc = arm_64_lpae_alloc_pgtable_s2, + .free = arm_lpae_free_pgtable, +}; + +struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { + .alloc = arm_32_lpae_alloc_pgtable_s1, + .free = arm_lpae_free_pgtable, +}; + +struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { + .alloc = arm_32_lpae_alloc_pgtable_s2, + .free = arm_lpae_free_pgtable, +}; + +#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST + +static struct io_pgtable_cfg *cfg_cookie; + +static void dummy_tlb_flush_all(void *cookie) +{ + WARN_ON(cookie != cfg_cookie); +} + +static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, + void *cookie) +{ + WARN_ON(cookie != cfg_cookie); + WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); +} + +static void dummy_tlb_sync(void *cookie) +{ + WARN_ON(cookie != cfg_cookie); +} + +static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie) +{ + WARN_ON(cookie != cfg_cookie); +} + +static struct iommu_gather_ops dummy_tlb_ops __initdata = { + .tlb_flush_all = dummy_tlb_flush_all, + .tlb_add_flush = dummy_tlb_add_flush, + .tlb_sync = dummy_tlb_sync, + .flush_pgtable = dummy_flush_pgtable, +}; + +static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + + pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", + cfg->pgsize_bitmap, cfg->ias); + pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", + data->levels, data->pgd_size, data->pg_shift, + data->bits_per_level, data->pgd); +} + +#define __FAIL(ops, i) ({ \ + WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ + arm_lpae_dump_ops(ops); \ + selftest_running = false; \ + -EFAULT; \ +}) + +static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) +{ + static const enum io_pgtable_fmt fmts[] = { + ARM_64_LPAE_S1, + ARM_64_LPAE_S2, + }; + + int i, j; + unsigned long iova; + size_t size; + struct io_pgtable_ops *ops; + + selftest_running = true; + + for (i = 0; i < ARRAY_SIZE(fmts); ++i) { + cfg_cookie = cfg; + ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); + if (!ops) { + pr_err("selftest: failed to allocate io pgtable ops\n"); + return -ENOMEM; + } + + /* + * Initial sanity checks. + * Empty page tables shouldn't provide any translations. + */ + if (ops->iova_to_phys(ops, 42)) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, SZ_1G + 42)) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, SZ_2G + 42)) + return __FAIL(ops, i); + + /* + * Distinct mappings of different granule sizes. + */ + iova = 0; + j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); + while (j != BITS_PER_LONG) { + size = 1UL << j; + + if (ops->map(ops, iova, iova, size, IOMMU_READ | + IOMMU_WRITE | + IOMMU_NOEXEC | + IOMMU_CACHE)) + return __FAIL(ops, i); + + /* Overlapping mappings */ + if (!ops->map(ops, iova, iova + size, size, + IOMMU_READ | IOMMU_NOEXEC)) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) + return __FAIL(ops, i); + + iova += SZ_1G; + j++; + j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); + } + + /* Partial unmap */ + size = 1UL << __ffs(cfg->pgsize_bitmap); + if (ops->unmap(ops, SZ_1G + size, size) != size) + return __FAIL(ops, i); + + /* Remap of partial unmap */ + if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) + return __FAIL(ops, i); + + /* Full unmap */ + iova = 0; + j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); + while (j != BITS_PER_LONG) { + size = 1UL << j; + + if (ops->unmap(ops, iova, size) != size) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, iova + 42)) + return __FAIL(ops, i); + + /* Remap full block */ + if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + return __FAIL(ops, i); + + if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) + return __FAIL(ops, i); + + iova += SZ_1G; + j++; + j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); + } + + free_io_pgtable_ops(ops); + } + + selftest_running = false; + return 0; +} + +static int __init arm_lpae_do_selftests(void) +{ + static const unsigned long pgsize[] = { + SZ_4K | SZ_2M | SZ_1G, + SZ_16K | SZ_32M, + SZ_64K | SZ_512M, + }; + + static const unsigned int ias[] = { + 32, 36, 40, 42, 44, 48, + }; + + int i, j, pass = 0, fail = 0; + struct io_pgtable_cfg cfg = { + .tlb = &dummy_tlb_ops, + .oas = 48, + }; + + for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { + for (j = 0; j < ARRAY_SIZE(ias); ++j) { + cfg.pgsize_bitmap = pgsize[i]; + cfg.ias = ias[j]; + pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", + pgsize[i], ias[j]); + if (arm_lpae_run_tests(&cfg)) + fail++; + else + pass++; + } + } + + pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); + return fail ? -EFAULT : 0; +} +subsys_initcall(arm_lpae_do_selftests); +#endif diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c new file mode 100644 index 000000000000..6436fe24bc2f --- /dev/null +++ b/drivers/iommu/io-pgtable.c @@ -0,0 +1,82 @@ +/* + * Generic page table allocator for IOMMUs. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/bug.h> +#include <linux/kernel.h> +#include <linux/types.h> + +#include "io-pgtable.h" + +extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; +extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; +extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; +extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; + +static const struct io_pgtable_init_fns * +io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = +{ +#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE + [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, + [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns, + [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, + [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, +#endif +}; + +struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, + struct io_pgtable_cfg *cfg, + void *cookie) +{ + struct io_pgtable *iop; + const struct io_pgtable_init_fns *fns; + + if (fmt >= IO_PGTABLE_NUM_FMTS) + return NULL; + + fns = io_pgtable_init_table[fmt]; + if (!fns) + return NULL; + + iop = fns->alloc(cfg, cookie); + if (!iop) + return NULL; + + iop->fmt = fmt; + iop->cookie = cookie; + iop->cfg = *cfg; + + return &iop->ops; +} + +/* + * It is the IOMMU driver's responsibility to ensure that the page table + * is no longer accessible to the walker by this point. + */ +void free_io_pgtable_ops(struct io_pgtable_ops *ops) +{ + struct io_pgtable *iop; + + if (!ops) + return; + + iop = container_of(ops, struct io_pgtable, ops); + iop->cfg.tlb->tlb_flush_all(iop->cookie); + io_pgtable_init_table[iop->fmt]->free(iop); +} diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h new file mode 100644 index 000000000000..10e32f69c668 --- /dev/null +++ b/drivers/iommu/io-pgtable.h @@ -0,0 +1,143 @@ +#ifndef __IO_PGTABLE_H +#define __IO_PGTABLE_H + +/* + * Public API for use by IOMMU drivers + */ +enum io_pgtable_fmt { + ARM_32_LPAE_S1, + ARM_32_LPAE_S2, + ARM_64_LPAE_S1, + ARM_64_LPAE_S2, + IO_PGTABLE_NUM_FMTS, +}; + +/** + * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. + * + * @tlb_flush_all: Synchronously invalidate the entire TLB context. + * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. + * @tlb_sync: Ensure any queue TLB invalidation has taken effect. + * @flush_pgtable: Ensure page table updates are visible to the IOMMU. + * + * Note that these can all be called in atomic context and must therefore + * not block. + */ +struct iommu_gather_ops { + void (*tlb_flush_all)(void *cookie); + void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, + void *cookie); + void (*tlb_sync)(void *cookie); + void (*flush_pgtable)(void *ptr, size_t size, void *cookie); +}; + +/** + * struct io_pgtable_cfg - Configuration data for a set of page tables. + * + * @quirks: A bitmap of hardware quirks that require some special + * action by the low-level page table allocator. + * @pgsize_bitmap: A bitmap of page sizes supported by this set of page + * tables. + * @ias: Input address (iova) size, in bits. + * @oas: Output address (paddr) size, in bits. + * @tlb: TLB management callbacks for this set of tables. + */ +struct io_pgtable_cfg { + #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ + int quirks; + unsigned long pgsize_bitmap; + unsigned int ias; + unsigned int oas; + const struct iommu_gather_ops *tlb; + + /* Low-level data specific to the table format */ + union { + struct { + u64 ttbr[2]; + u64 tcr; + u64 mair[2]; + } arm_lpae_s1_cfg; + + struct { + u64 vttbr; + u64 vtcr; + } arm_lpae_s2_cfg; + }; +}; + +/** + * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. + * + * @map: Map a physically contiguous memory region. + * @unmap: Unmap a physically contiguous memory region. + * @iova_to_phys: Translate iova to physical address. + * + * These functions map directly onto the iommu_ops member functions with + * the same names. + */ +struct io_pgtable_ops { + int (*map)(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, + size_t size); + phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, + unsigned long iova); +}; + +/** + * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. + * + * @fmt: The page table format. + * @cfg: The page table configuration. This will be modified to represent + * the configuration actually provided by the allocator (e.g. the + * pgsize_bitmap may be restricted). + * @cookie: An opaque token provided by the IOMMU driver and passed back to + * the callback routines in cfg->tlb. + */ +struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, + struct io_pgtable_cfg *cfg, + void *cookie); + +/** + * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller + * *must* ensure that the page table is no longer + * live, but the TLB can be dirty. + * + * @ops: The ops returned from alloc_io_pgtable_ops. + */ +void free_io_pgtable_ops(struct io_pgtable_ops *ops); + + +/* + * Internal structures for page table allocator implementations. + */ + +/** + * struct io_pgtable - Internal structure describing a set of page tables. + * + * @fmt: The page table format. + * @cookie: An opaque token provided by the IOMMU driver and passed back to + * any callback routines. + * @cfg: A copy of the page table configuration. + * @ops: The page table operations in use for this set of page tables. + */ +struct io_pgtable { + enum io_pgtable_fmt fmt; + void *cookie; + struct io_pgtable_cfg cfg; + struct io_pgtable_ops ops; +}; + +/** + * struct io_pgtable_init_fns - Alloc/free a set of page tables for a + * particular format. + * + * @alloc: Allocate a set of page tables described by cfg. + * @free: Free the page tables associated with iop. + */ +struct io_pgtable_init_fns { + struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); + void (*free)(struct io_pgtable *iop); +}; + +#endif /* __IO_PGTABLE_H */ diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index d111ac779c40..63cd031b2c28 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -28,7 +28,7 @@ #define AT91_AIC_IRQ_MIN_PRIORITY 0 #define AT91_AIC_IRQ_MAX_PRIORITY 7 -#define AT91_AIC_SRCTYPE GENMASK(7, 6) +#define AT91_AIC_SRCTYPE GENMASK(6, 5) #define AT91_AIC_SRCTYPE_LOW (0 << 5) #define AT91_AIC_SRCTYPE_FALLING (1 << 5) #define AT91_AIC_SRCTYPE_HIGH (2 << 5) @@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val) return -EINVAL; } - *val &= AT91_AIC_SRCTYPE; + *val &= ~AT91_AIC_SRCTYPE; *val |= aic_type; return 0; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 86e4684adeb1..d8996bdf0f61 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, * of two entries. No, the architecture doesn't let you * express an ITT with a single entry. */ - nr_ites = max(2, roundup_pow_of_two(nvecs)); + nr_ites = max(2UL, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kmalloc(sz, GFP_KERNEL); diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 29b8f21b74d0..6bc2deb73d53 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent) * It will be refined as each CPU probes its ID. */ for (i = 0; i < NR_HIP04_CPU_IF; i++) - hip04_cpu_map[i] = 0xff; + hip04_cpu_map[i] = 0xffff; /* * Find out how many interrupts are supported. diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 7e342df6a62f..0b0d2c00a2df 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node, return -ENOMEM; chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol"); - if (!chip_data->intpol_base) { + if (IS_ERR(chip_data->intpol_base)) { pr_err("mtk_sysirq: unable to map sysirq register\n"); - ret = -ENOMEM; + ret = PTR_ERR(chip_data->intpol_base); goto out_free; } diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index 28718d3e8281..c03f140acbae 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c @@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node) return ret; } -static int __init omap_init_irq_legacy(u32 base) +static int __init omap_init_irq_legacy(u32 base, struct device_node *node) { int j, irq_base; @@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base) irq_base = 0; } - domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0, + domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL); omap_irq_soft_reset(); @@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node) { int ret; - if (node) + /* + * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c + * depends is still not ready for linear IRQ domains; because of that + * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using + * linear IRQ Domain until that driver is finally fixed. + */ + if (of_device_is_compatible(node, "ti,omap2-intc") || + of_device_is_compatible(node, "ti,omap3-intc")) { + struct resource res; + + if (of_address_to_resource(node, 0, &res)) + return -ENOMEM; + + base = res.start; + ret = omap_init_irq_legacy(base, node); + } else if (node) { ret = omap_init_irq_of(node); - else - ret = omap_init_irq_legacy(base); + } else { + ret = omap_init_irq_legacy(base, NULL); + } if (ret == 0) omap_irq_enable_protection(); diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9fc616c2755e..21b156242e42 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -94,6 +94,9 @@ struct cache_disk_superblock { } __packed; struct dm_cache_metadata { + atomic_t ref_count; + struct list_head list; + struct block_device *bdev; struct dm_block_manager *bm; struct dm_space_map *metadata_sm; @@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags) /*----------------------------------------------------------------*/ -struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, - sector_t data_block_size, - bool may_format_device, - size_t policy_hint_size) +static struct dm_cache_metadata *metadata_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) { int r; struct dm_cache_metadata *cmd; @@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, return NULL; } + atomic_set(&cmd->ref_count, 1); init_rwsem(&cmd->root_lock); cmd->bdev = bdev; cmd->data_block_size = data_block_size; @@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, return cmd; } +/* + * We keep a little list of ref counted metadata objects to prevent two + * different target instances creating separate bufio instances. This is + * an issue if a table is reloaded before the suspend. + */ +static DEFINE_MUTEX(table_lock); +static LIST_HEAD(table); + +static struct dm_cache_metadata *lookup(struct block_device *bdev) +{ + struct dm_cache_metadata *cmd; + + list_for_each_entry(cmd, &table, list) + if (cmd->bdev == bdev) { + atomic_inc(&cmd->ref_count); + return cmd; + } + + return NULL; +} + +static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) +{ + struct dm_cache_metadata *cmd, *cmd2; + + mutex_lock(&table_lock); + cmd = lookup(bdev); + mutex_unlock(&table_lock); + + if (cmd) + return cmd; + + cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); + if (cmd) { + mutex_lock(&table_lock); + cmd2 = lookup(bdev); + if (cmd2) { + mutex_unlock(&table_lock); + __destroy_persistent_data_objects(cmd); + kfree(cmd); + return cmd2; + } + list_add(&cmd->list, &table); + mutex_unlock(&table_lock); + } + + return cmd; +} + +static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size) +{ + if (cmd->data_block_size != data_block_size) { + DMERR("data_block_size (%llu) different from that in metadata (%llu)\n", + (unsigned long long) data_block_size, + (unsigned long long) cmd->data_block_size); + return false; + } + + return true; +} + +struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) +{ + struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, + may_format_device, policy_hint_size); + if (cmd && !same_params(cmd, data_block_size)) { + dm_cache_metadata_close(cmd); + return NULL; + } + + return cmd; +} + void dm_cache_metadata_close(struct dm_cache_metadata *cmd) { - __destroy_persistent_data_objects(cmd); - kfree(cmd); + if (atomic_dec_and_test(&cmd->ref_count)) { + mutex_lock(&table_lock); + list_del(&cmd->list); + mutex_unlock(&table_lock); + + __destroy_persistent_data_objects(cmd); + kfree(cmd); + } } /* diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1e96d7889f51..e1650539cc2f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -221,7 +221,13 @@ struct cache { struct list_head need_commit_migrations; sector_t migration_threshold; wait_queue_head_t migration_wait; - atomic_t nr_migrations; + atomic_t nr_allocated_migrations; + + /* + * The number of in flight migrations that are performing + * background io. eg, promotion, writeback. + */ + atomic_t nr_io_migrations; wait_queue_head_t quiescing_wait; atomic_t quiescing; @@ -258,7 +264,6 @@ struct cache { struct dm_deferred_set *all_io_ds; mempool_t *migration_pool; - struct dm_cache_migration *next_migration; struct dm_cache_policy *policy; unsigned policy_nr_args; @@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel dm_bio_prison_free_cell(cache->prison, cell); } +static struct dm_cache_migration *alloc_migration(struct cache *cache) +{ + struct dm_cache_migration *mg; + + mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); + if (mg) { + mg->cache = cache; + atomic_inc(&mg->cache->nr_allocated_migrations); + } + + return mg; +} + +static void free_migration(struct dm_cache_migration *mg) +{ + if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) + wake_up(&mg->cache->migration_wait); + + mempool_free(mg, mg->cache->migration_pool); +} + static int prealloc_data_structs(struct cache *cache, struct prealloc *p) { if (!p->mg) { - p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); + p->mg = alloc_migration(cache); if (!p->mg) return -ENOMEM; } @@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p) free_prison_cell(cache, p->cell1); if (p->mg) - mempool_free(p->mg, cache->migration_pool); + free_migration(p->mg); } static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) @@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, * Migration covers moving data from the origin device to the cache, or * vice versa. *--------------------------------------------------------------*/ -static void free_migration(struct dm_cache_migration *mg) -{ - mempool_free(mg, mg->cache->migration_pool); -} - -static void inc_nr_migrations(struct cache *cache) +static void inc_io_migrations(struct cache *cache) { - atomic_inc(&cache->nr_migrations); + atomic_inc(&cache->nr_io_migrations); } -static void dec_nr_migrations(struct cache *cache) +static void dec_io_migrations(struct cache *cache) { - atomic_dec(&cache->nr_migrations); - - /* - * Wake the worker in case we're suspending the target. - */ - wake_up(&cache->migration_wait); + atomic_dec(&cache->nr_io_migrations); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, @@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, wake_worker(cache); } -static void cleanup_migration(struct dm_cache_migration *mg) +static void free_io_migration(struct dm_cache_migration *mg) { - struct cache *cache = mg->cache; + dec_io_migrations(mg->cache); free_migration(mg); - dec_nr_migrations(cache); } static void migration_failure(struct dm_cache_migration *mg) @@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg) cell_defer(cache, mg->new_ocell, true); } - cleanup_migration(mg); + free_io_migration(mg); } static void migration_success_pre_commit(struct dm_cache_migration *mg) @@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) if (mg->writeback) { clear_dirty(cache, mg->old_oblock, mg->cblock); cell_defer(cache, mg->old_ocell, false); - cleanup_migration(mg); + free_io_migration(mg); return; } else if (mg->demote) { @@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) mg->old_oblock); if (mg->promote) cell_defer(cache, mg->new_ocell, true); - cleanup_migration(mg); + free_io_migration(mg); return; } } else { if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); policy_remove_mapping(cache->policy, mg->new_oblock); - cleanup_migration(mg); + free_io_migration(mg); return; } } @@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) } else { if (mg->invalidate) policy_remove_mapping(cache->policy, mg->old_oblock); - cleanup_migration(mg); + free_io_migration(mg); } } else { @@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) bio_endio(mg->new_ocell->holder, 0); cell_defer(cache, mg->new_ocell, false); } - cleanup_migration(mg); + free_io_migration(mg); } } @@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs, mg->new_ocell = cell; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs, mg->new_ocell = NULL; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs, mg->new_ocell = new_ocell; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs, mg->new_ocell = NULL; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs, static bool spare_migration_bandwidth(struct cache *cache) { - sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * + sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * cache->sectors_per_block; return current_volume < cache->migration_threshold; } @@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache) static void wait_for_migrations(struct cache *cache) { - wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); + wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); } static void stop_worker(struct cache *cache) @@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache) { unsigned i; - if (cache->next_migration) - mempool_free(cache->next_migration, cache->migration_pool); - if (cache->migration_pool) mempool_destroy(cache->migration_pool); @@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); - atomic_set(&cache->nr_migrations, 0); + atomic_set(&cache->nr_allocated_migrations, 0); + atomic_set(&cache->nr_io_migrations, 0); init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->quiescing_wait); @@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) goto bad; } - cache->next_migration = NULL; - cache->need_tick_bio = true; cache->sized = false; cache->invalidate = false; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b98cd9d84435..2caf5b374649 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -206,6 +206,9 @@ struct mapped_device { /* zero-length flush that will be cloned and submitted to targets */ struct bio flush_bio; + /* the number of internal suspends */ + unsigned internal_suspend_count; + struct dm_stats stats; }; @@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla { struct dm_table *map = NULL; - if (dm_suspended_internally_md(md)) + if (md->internal_suspend_count++) return; /* nested internal suspend */ if (dm_suspended_md(md)) { @@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla static void __dm_internal_resume(struct mapped_device *md) { - if (!dm_suspended_internally_md(md)) + BUG_ON(!md->internal_suspend_count); + + if (--md->internal_suspend_count) return; /* resume from nested internal suspend */ if (dm_suspended_md(md)) diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index db99ca2613ba..06931f6fa26c 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = { .portb = CX23885_MPEG_DVB, }, [CX23885_BOARD_HAUPPAUGE_HVR4400] = { - .name = "Hauppauge WinTV-HVR4400", + .name = "Hauppauge WinTV-HVR4400/HVR5500", .porta = CX23885_ANALOG_VIDEO, .portb = CX23885_MPEG_DVB, .portc = CX23885_MPEG_DVB, @@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = { .tuner_addr = 0x60, /* 0xc0 >> 1 */ .tuner_bus = 1, }, + [CX23885_BOARD_HAUPPAUGE_STARBURST] = { + .name = "Hauppauge WinTV Starburst", + .portb = CX23885_MPEG_DVB, + }, [CX23885_BOARD_AVERMEDIA_HC81R] = { .name = "AVerTV Hybrid Express Slim HC81R", .tuner_type = TUNER_XC2028, @@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = { }, { .subvendor = 0x0070, .subdevice = 0xc108, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */ }, { .subvendor = 0x0070, .subdevice = 0xc138, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */ }, { .subvendor = 0x0070, .subdevice = 0xc12a, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */ }, { .subvendor = 0x0070, .subdevice = 0xc1f8, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */ }, { .subvendor = 0x1461, .subdevice = 0xd939, @@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/ break; case CX23885_BOARD_HAUPPAUGE_HVR4400: + case CX23885_BOARD_HAUPPAUGE_STARBURST: /* GPIO-8 tda10071 demod reset */ - /* GPIO-9 si2165 demod reset */ + /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/ /* Put the parts into reset and back */ cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1); @@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_HAUPPAUGE_HVR1850: case CX23885_BOARD_HAUPPAUGE_HVR1290: case CX23885_BOARD_HAUPPAUGE_HVR4400: + case CX23885_BOARD_HAUPPAUGE_STARBURST: case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE: if (dev->i2c_bus[0].i2c_rc == 0) hauppauge_eeprom(dev, eeprom+0xc0); @@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev) ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; break; + case CX23885_BOARD_HAUPPAUGE_STARBURST: + ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ + ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ + ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; + break; case CX23885_BOARD_DVBSKY_T9580: case CX23885_BOARD_DVBSKY_T982: ts1->gen_ctrl_val = 0x5; /* Parallel */ diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 1d9d0f86ca8c..1ad49946d7fa 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev) cx23885_shutdown(dev); - pci_disable_device(pci_dev); - /* unregister stuff */ free_irq(pci_dev->irq, dev); + pci_disable_device(pci_dev); + cx23885_dev_unregister(dev); vb2_dma_sg_cleanup_ctx(dev->alloc_ctx); v4l2_ctrl_handler_free(&dev->ctrl_handler); diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index c47d18270cfc..a9c450d4b54e 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port) break; } break; + case CX23885_BOARD_HAUPPAUGE_STARBURST: + i2c_bus = &dev->i2c_bus[0]; + fe0->dvb.frontend = dvb_attach(tda10071_attach, + &hauppauge_tda10071_config, + &i2c_bus->i2c_adap); + if (fe0->dvb.frontend != NULL) { + dvb_attach(a8293_attach, fe0->dvb.frontend, + &i2c_bus->i2c_adap, + &hauppauge_a8293_config); + } + break; case CX23885_BOARD_DVBSKY_T9580: case CX23885_BOARD_DVBSKY_S950: i2c_bus = &dev->i2c_bus[0]; diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index f55cd12da0fd..36f2f96c40e4 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -99,6 +99,7 @@ #define CX23885_BOARD_DVBSKY_S950 49 #define CX23885_BOARD_DVBSKY_S952 50 #define CX23885_BOARD_DVBSKY_T982 51 +#define CX23885_BOARD_HAUPPAUGE_STARBURST 52 #define GPIO_0 0x00000001 #define GPIO_1 0x00000002 diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index b463fe172d16..3fe9047ef466 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) strlcpy(cap->card, video->video.name, sizeof(cap->card)); strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT + | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; + if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; else - cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; return 0; } diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c index 8efe40337608..6d885239b16a 100644 --- a/drivers/media/platform/soc_camera/atmel-isi.c +++ b/drivers/media/platform/soc_camera/atmel-isi.c @@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici, { strcpy(cap->driver, "atmel-isi"); strcpy(cap->card, "Atmel Image Sensor Interface"); - cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_STREAMING); + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; } diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c index ce72bd26a6ac..192377f55840 100644 --- a/drivers/media/platform/soc_camera/mx2_camera.c +++ b/drivers/media/platform/soc_camera/mx2_camera.c @@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici, { /* cap->name is set by the friendly caller:-> */ strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c index a60c3bb0e4cc..0b3299dee05d 100644 --- a/drivers/media/platform/soc_camera/mx3_camera.c +++ b/drivers/media/platform/soc_camera/mx3_camera.c @@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici, { /* cap->name is set by the firendly caller:-> */ strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c index e6b93281f246..16f65ecb70a3 100644 --- a/drivers/media/platform/soc_camera/omap1_camera.c +++ b/drivers/media/platform/soc_camera/omap1_camera.c @@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici, { /* cap->name is set by the friendly caller:-> */ strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c index 951226af0eba..8d6e343fec0f 100644 --- a/drivers/media/platform/soc_camera/pxa_camera.c +++ b/drivers/media/platform/soc_camera/pxa_camera.c @@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici, { /* cap->name is set by the firendly caller:-> */ strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index 0c1f55648106..9f1473c0a0cf 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c @@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; } diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 8b27b3eb2b25..71787702d4a2 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; } diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 0f345b1f9014..f327c49d7e09 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = { { "Mygica T230 DVB-T/T2/C", { NULL }, - { &cxusb_table[22], NULL }, + { &cxusb_table[20], NULL }, }, } }; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index 1b158f1167ed..536210b39428 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(vbi_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); -static struct v4l2_capability pvr_capability ={ - .driver = "pvrusb2", - .card = "Hauppauge WinTV pvr-usb2", - .bus_info = "usb", - .version = LINUX_VERSION_CODE, - .capabilities = (V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | - V4L2_CAP_READWRITE), -}; - static struct v4l2_fmtdesc pvr_fmtdesc [] = { { .index = 0, @@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability * struct pvr2_v4l2_fh *fh = file->private_data; struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; - memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); + strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver)); strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw), sizeof(cap->bus_info)); strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card)); + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | + V4L2_CAP_AUDIO | V4L2_CAP_RADIO | + V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS; + switch (fh->pdi->devbase.vfl_type) { + case VFL_TYPE_GRABBER: + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO; + break; + case VFL_TYPE_RADIO: + cap->device_caps = V4L2_CAP_RADIO; + break; + } + cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE; return 0; } diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index d09a8916e940..bc08a829bc13 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -3146,27 +3146,26 @@ static int vb2_thread(void *data) prequeue--; } else { call_void_qop(q, wait_finish, q); - ret = vb2_internal_dqbuf(q, &fileio->b, 0); + if (!threadio->stop) + ret = vb2_internal_dqbuf(q, &fileio->b, 0); call_void_qop(q, wait_prepare, q); dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); } - if (threadio->stop) - break; - if (ret) + if (ret || threadio->stop) break; try_to_freeze(); vb = q->bufs[fileio->b.index]; if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR)) - ret = threadio->fnc(vb, threadio->priv); - if (ret) - break; + if (threadio->fnc(vb, threadio->priv)) + break; call_void_qop(q, wait_finish, q); if (set_timestamp) v4l2_get_timestamp(&fileio->b.timestamp); - ret = vb2_internal_qbuf(q, &fileio->b); + if (!threadio->stop) + ret = vb2_internal_qbuf(q, &fileio->b); call_void_qop(q, wait_prepare, q); - if (ret) + if (ret || threadio->stop) break; } @@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q) threadio->stop = true; vb2_internal_streamoff(q, q->type); call_void_qop(q, wait_prepare, q); + err = kthread_stop(threadio->thread); q->fileio = NULL; fileio->req.count = 0; vb2_reqbufs(q, &fileio->req); kfree(fileio); - err = kthread_stop(threadio->thread); threadio->thread = NULL; kfree(threadio); q->fileio = NULL; diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c index 52a0c2f6264f..ae498b53ee40 100644 --- a/drivers/mfd/da9052-core.c +++ b/drivers/mfd/da9052-core.c @@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id) return ret; } - ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info, + ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO, + da9052_subdev_info, ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL); if (ret) { dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret); diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index dbdd0faeb6ce..210d1f85679e 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c @@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) #ifdef CONFIG_PM static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) { - struct rtsx_ucr *ucr = - (struct rtsx_ucr *)usb_get_intfdata(intf); - dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", __func__, message.event); - /* - * Call to make sure LED is off during suspend to save more power. - * It is NOT a permanent state and could be turned on anytime later. - * Thus no need to call turn_on when resunming. - */ - mutex_lock(&ucr->dev_mutex); - rtsx_usb_turn_off_led(ucr); - mutex_unlock(&ucr->dev_mutex); - return 0; } diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 0d256cb002eb..d6b764349f9d 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg, } EXPORT_SYMBOL_GPL(tps65218_clear_bits); +static const struct regmap_range tps65218_yes_ranges[] = { + regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2), + regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS), +}; + +static const struct regmap_access_table tps65218_volatile_table = { + .yes_ranges = tps65218_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges), +}; + static struct regmap_config tps65218_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, + .volatile_table = &tps65218_volatile_table, }; static const struct regmap_irq tps65218_irqs[] = { @@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = { .num_regs = 2, .mask_base = TPS65218_REG_INT_MASK1, + .status_base = TPS65218_REG_INT1, }; static const struct of_device_id of_tps65218_match_table[] = { diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index f363972cd77d..e36d10520e24 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable) mask = 1 << raminit->bits.start | 1 << raminit->bits.done; regmap_read(raminit->syscon, raminit->reg, &ctrl); - /* We clear the done and start bit first. The start bit is + /* We clear the start bit first. The start bit is * looking at the 0 -> transition, but is not self clearing; - * And we clear the init done bit as well. * NOTE: DONE must be written with 1 to clear it. + * We can't clear the DONE bit here using regmap_update_bits() + * as it will bypass the write if initial condition is START:0 DONE:1 + * e.g. on DRA7 which needs START pulse. */ - ctrl &= ~(1 << raminit->bits.start); - ctrl |= 1 << raminit->bits.done; - regmap_write(raminit->syscon, raminit->reg, ctrl); + ctrl &= ~mask; /* START = 0, DONE = 0 */ + regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); - ctrl &= ~(1 << raminit->bits.done); - c_can_hw_raminit_wait_syscon(priv, mask, ctrl); + /* check if START bit is 0. Ignore DONE bit for now + * as it can be either 0 or 1. + */ + c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl); if (enable) { - /* Set start bit and wait for the done bit. */ + /* Clear DONE bit & set START bit. */ ctrl |= 1 << raminit->bits.start; - regmap_write(raminit->syscon, raminit->reg, ctrl); - + /* DONE must be written with 1 to clear it */ + ctrl |= 1 << raminit->bits.done; + regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); + /* prevent further clearing of DONE bit */ + ctrl &= ~(1 << raminit->bits.done); /* clear START bit if start pulse is needed */ if (raminit->needs_pulse) { ctrl &= ~(1 << raminit->bits.start); - regmap_write(raminit->syscon, raminit->reg, ctrl); + regmap_update_bits(raminit->syscon, raminit->reg, + mask, ctrl); } ctrl |= 1 << raminit->bits.done; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3ec8f6f25e5f..847c1f813261 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -807,10 +807,14 @@ static int can_changelink(struct net_device *dev, if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); - if (cm->flags & ~priv->ctrlmode_supported) + + /* check whether changed bits are allowed to be modified */ + if (cm->mask & ~priv->ctrlmode_supported) return -EOPNOTSUPP; + + /* clear bits to be modified and copy the flag values */ priv->ctrlmode &= ~cm->mask; - priv->ctrlmode |= cm->flags; + priv->ctrlmode |= (cm->flags & cm->mask); /* CAN_CTRLMODE_FD can only be set when driver supports FD */ if (priv->ctrlmode & CAN_CTRLMODE_FD) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index d7bc462aafdc..244529881be9 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void) priv->can.data_bittiming_const = &m_can_data_bittiming_const; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; + + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ + priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; + + /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 541fb7a05625..c32cd61073bc 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -520,10 +520,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, skb = alloc_can_err_skb(priv->netdev, &cf); if (skb) { cf->can_id |= CAN_ERR_RESTARTED; - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } else { netdev_err(priv->netdev, "No memory left for err_skb\n"); @@ -770,10 +770,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, priv->can.state = new_state; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, @@ -805,10 +804,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, stats->rx_over_errors++; stats->rx_errors++; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } } @@ -887,10 +885,9 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, cf->can_dlc); } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, @@ -1246,6 +1243,9 @@ static int kvaser_usb_close(struct net_device *netdev) if (err) netdev_warn(netdev, "Cannot stop device, error %d\n", err); + /* reset tx contexts */ + kvaser_usb_unlink_tx_urbs(priv); + priv->can.state = CAN_STATE_STOPPED; close_candev(priv->netdev); @@ -1294,12 +1294,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); stats->tx_dropped++; - goto nourbmem; + dev_kfree_skb(skb); + return NETDEV_TX_OK; } buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { stats->tx_dropped++; + dev_kfree_skb(skb); goto nobufmem; } @@ -1334,6 +1336,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, } } + /* This should never happen; it implies a flow control bug */ if (!context) { netdev_warn(netdev, "cannot find free context\n"); ret = NETDEV_TX_BUSY; @@ -1364,9 +1367,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index); - skb = NULL; /* set to NULL to avoid double free in - * dev_kfree_skb(skb) */ - atomic_dec(&priv->active_tx_urbs); usb_unanchor_urb(urb); @@ -1388,8 +1388,6 @@ releasebuf: kfree(buf); nobufmem: usb_free_urb(urb); -nourbmem: - dev_kfree_skb(skb); return ret; } @@ -1502,6 +1500,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf, struct kvaser_usb_net_priv *priv; int i, err; + err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); + if (err) + return err; + netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Cannot alloc candev\n"); @@ -1606,9 +1608,6 @@ static int kvaser_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, dev); - for (i = 0; i < MAX_NET_DEVICES; i++) - kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i); - err = kvaser_usb_get_software_info(dev); if (err) { dev_err(&intf->dev, diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 05c6af6c418f..3007d95fbb9f 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight) bgmac->int_status = 0; } - if (handled < weight) + if (handled < weight) { napi_complete(napi); - - bgmac_chip_intrs_on(bgmac); + bgmac_chip_intrs_on(bgmac); + } return handled; } @@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core) if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); + netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); + err = bgmac_mii_register(bgmac); if (err) { bgmac_err(bgmac, "Cannot register MDIO\n"); @@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core) netif_carrier_off(net_dev); - netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); - return 0; err_mii_unregister: @@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core) { struct bgmac *bgmac = bcma_get_drvdata(core); - netif_napi_del(&bgmac->napi); unregister_netdev(bgmac->net_dev); bgmac_mii_unregister(bgmac); + netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); bcma_set_drvdata(core, NULL); free_netdev(bgmac->net_dev); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 41a0a5498da7..d48806b5cd88 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4383,8 +4383,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload * is expected to work across all types of IP tunnels once exported. Skyhawk * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN - * offloads in hw_enc_features only when a VxLAN port is added. Note this only - * ensures that other tunnels work fine while VxLAN offloads are not enabled. + * offloads in hw_enc_features only when a VxLAN port is added. If other (non + * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for + * those other tunnels are unexported on the fly through ndo_features_check(). * * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack * adds more than one port, disable offloads and don't re-enable them again @@ -4463,7 +4464,41 @@ static netdev_features_t be_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - return vxlan_features_check(skb, features); + struct be_adapter *adapter = netdev_priv(dev); + u8 l4_hdr = 0; + + /* The code below restricts offload features for some tunneled packets. + * Offload features for normal (non tunnel) packets are unchanged. + */ + if (!skb->encapsulation || + !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) + return features; + + /* It's an encapsulated packet and VxLAN offloads are enabled. We + * should disable tunnel offload features if it's not a VxLAN packet, + * as tunnel offloads have been enabled only for VxLAN. This is done to + * allow other tunneled traffic like GRE work fine while VxLAN + * offloads are configured in Skyhawk-R. + */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + if (l4_hdr != IPPROTO_UDP || + skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)) + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + + return features; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d0d6dc1b8e46..ac6a8f1eea6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad { int err; - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || + priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) return 0; /* do nothing */ err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 03e9eb0dc761..6e08352ec994 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1744,8 +1744,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && - dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS && - dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; else dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index f5e4b820128b..db0c7a9aee60 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp) if (sp->s2io_entries[i].in_use == MSIX_FLG) { if (sp->s2io_entries[i].type == MSIX_RING_TYPE) { - sprintf(sp->desc[i], "%s:MSI-X-%d-RX", + snprintf(sp->desc[i], + sizeof(sp->desc[i]), + "%s:MSI-X-%d-RX", dev->name, i); err = request_irq(sp->entries[i].vector, s2io_msix_ring_handle, @@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp) sp->s2io_entries[i].arg); } else if (sp->s2io_entries[i].type == MSIX_ALARM_TYPE) { - sprintf(sp->desc[i], "%s:MSI-X-%d-TX", + snprintf(sp->desc[i], + sizeof(sp->desc[i]), + "%s:MSI-X-%d-TX", dev->name, i); err = request_irq(sp->entries[i].vector, s2io_msix_fifo_handle, @@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) "%s: UDP Fragmentation Offload(UFO) enabled\n", dev->name); /* Initialize device name */ - sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); + snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, + sp->product_name); if (vlan_tag_strip) sp->vlan_strip_flag = 1; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 37583a9d8853..6576243222af 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -498,6 +498,8 @@ static struct sh_eth_cpu_data r8a779x_data = { EESR_ECI, .fdr_value = 0x00000f0f, + .trscer_err_mask = DESC_I_RINT8, + .apr = 1, .mpr = 1, .tpauser = 1, @@ -538,8 +540,6 @@ static struct sh_eth_cpu_data sh7724_data = { EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .trscer_err_mask = DESC_I_RINT8, - .apr = 1, .mpr = 1, .tpauser = 1, @@ -1827,6 +1827,9 @@ static int sh_eth_get_settings(struct net_device *ndev, unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); ret = phy_ethtool_gset(mdp->phydev, ecmd); spin_unlock_irqrestore(&mdp->lock, flags); @@ -1841,6 +1844,9 @@ static int sh_eth_set_settings(struct net_device *ndev, unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); /* disable tx and rx */ @@ -1875,6 +1881,9 @@ static int sh_eth_nway_reset(struct net_device *ndev) unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); ret = phy_start_aneg(mdp->phydev); spin_unlock_irqrestore(&mdp->lock, flags); @@ -2184,6 +2193,7 @@ static int sh_eth_close(struct net_device *ndev) if (mdp->phydev) { phy_stop(mdp->phydev); phy_disconnect(mdp->phydev); + mdp->phydev = NULL; } free_irq(ndev->irq, ndev); @@ -2417,7 +2427,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev) struct sh_eth_private *mdp = netdev_priv(ndev); int i, ret; - if (unlikely(!mdp->cd->tsu)) + if (!mdp->cd->tsu) return 0; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { @@ -2440,7 +2450,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i; - if (unlikely(!mdp->cd->tsu)) + if (!mdp->cd->tsu) return; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { @@ -2450,8 +2460,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) } } -/* Multicast reception directions set */ -static void sh_eth_set_multicast_list(struct net_device *ndev) +/* Update promiscuous flag and multicast filter */ +static void sh_eth_set_rx_mode(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); u32 ecmr_bits; @@ -2462,7 +2472,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) /* Initial condition is MCT = 1, PRM = 0. * Depending on ndev->flags, set PRM or clear MCT */ - ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; + ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM; + if (mdp->cd->tsu) + ecmr_bits |= ECMR_MCT; if (!(ndev->flags & IFF_MULTICAST)) { sh_eth_tsu_purge_mcast(ndev); @@ -2491,9 +2503,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) } } } - } else { - /* Normal, unicast/broadcast-only mode. */ - ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; } /* update the ethernet mode */ @@ -2701,6 +2710,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, .ndo_tx_timeout = sh_eth_tx_timeout, .ndo_do_ioctl = sh_eth_do_ioctl, .ndo_validate_addr = eth_validate_addr, @@ -2713,7 +2723,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, - .ndo_set_rx_mode = sh_eth_set_multicast_list, + .ndo_set_rx_mode = sh_eth_set_rx_mode, .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, .ndo_tx_timeout = sh_eth_tx_timeout, diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 698494481d18..b1a271853d85 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -474,13 +474,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, /* allocate memory for RX skbuff array */ rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, sizeof(dma_addr_t), GFP_KERNEL); - if (rx_ring->rx_skbuff_dma == NULL) - goto dmamem_err; + if (!rx_ring->rx_skbuff_dma) { + dma_free_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + rx_ring->dma_rx, rx_ring->dma_rx_phy); + goto error; + } rx_ring->rx_skbuff = kmalloc_array(rx_rsize, sizeof(struct sk_buff *), GFP_KERNEL); - if (rx_ring->rx_skbuff == NULL) - goto rxbuff_err; + if (!rx_ring->rx_skbuff) { + kfree(rx_ring->rx_skbuff_dma); + goto error; + } /* initialise the buffers */ for (desc_index = 0; desc_index < rx_rsize; desc_index++) { @@ -502,13 +508,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, err_init_rx_buffers: while (--desc_index >= 0) free_rx_ring(priv->device, rx_ring, desc_index); - kfree(rx_ring->rx_skbuff); -rxbuff_err: - kfree(rx_ring->rx_skbuff_dma); -dmamem_err: - dma_free_coherent(priv->device, - rx_rsize * sizeof(struct sxgbe_rx_norm_desc), - rx_ring->dma_rx, rx_ring->dma_rx_phy); error: return -ENOMEM; } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 866560ea9e18..b02eed12bfc5 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) } } - /* Get MAC address if available (DT) */ - if (mac) - ether_addr_copy(priv->dev->dev_addr, mac); - priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed\n", __func__); @@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev) goto err_drv_remove; } + /* Get MAC address if available (DT) */ + if (mac) + ether_addr_copy(priv->dev->dev_addr, mac); + /* Get the TX/RX IRQ numbers */ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 64d1cef4cda1..e068d48b0f21 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1634,16 +1634,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unsigned short vid) { int ret; - int unreg_mcast_mask; + int unreg_mcast_mask = 0; + u32 port_mask; - if (priv->ndev->flags & IFF_ALLMULTI) - unreg_mcast_mask = ALE_ALL_PORTS; - else - unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + if (priv->data.dual_emac) { + port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = port_mask; + } else { + port_mask = ALE_ALL_PORTS; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + } - ret = cpsw_ale_add_vlan(priv->ale, vid, - ALE_ALL_PORTS << priv->host_port, - 0, ALE_ALL_PORTS << priv->host_port, + ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, unreg_mcast_mask << priv->host_port); if (ret != 0) return ret; @@ -1654,8 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, goto clean_vid; ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - ALE_ALL_PORTS << priv->host_port, - ALE_VLAN, vid, 0); + port_mask, ALE_VLAN, vid, 0); if (ret != 0) goto clean_vlan_ucast; return 0; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ea712512c7d1..5fae4354722c 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -62,6 +62,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_mdio.h> #include <linux/of_irq.h> #include <linux/of_net.h> @@ -343,9 +344,7 @@ struct emac_priv { u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; const char *phy_id; -#ifdef CONFIG_OF struct device_node *phy_node; -#endif struct phy_device *phydev; spinlock_t lock; /*platform specific members*/ @@ -922,6 +921,16 @@ static void emac_int_disable(struct emac_priv *priv) if (priv->int_disable) priv->int_disable(); + /* NOTE: Rx Threshold and Misc interrupts are not enabled */ + + /* ack rxen only then a new pulse will be generated */ + emac_write(EMAC_DM646X_MACEOIVECTOR, + EMAC_DM646X_MAC_EOI_C0_RXEN); + + /* ack txen- only then a new pulse will be generated */ + emac_write(EMAC_DM646X_MACEOIVECTOR, + EMAC_DM646X_MAC_EOI_C0_TXEN); + local_irq_restore(flags); } else { @@ -951,15 +960,6 @@ static void emac_int_enable(struct emac_priv *priv) * register */ /* NOTE: Rx Threshold and Misc interrupts are not enabled */ - - /* ack rxen only then a new pulse will be generated */ - emac_write(EMAC_DM646X_MACEOIVECTOR, - EMAC_DM646X_MAC_EOI_C0_RXEN); - - /* ack txen- only then a new pulse will be generated */ - emac_write(EMAC_DM646X_MACEOIVECTOR, - EMAC_DM646X_MAC_EOI_C0_TXEN); - } else { /* Set DM644x control registers for interrupt control */ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1); @@ -1537,7 +1537,13 @@ static int emac_dev_open(struct net_device *ndev) int i = 0; struct emac_priv *priv = netdev_priv(ndev); - pm_runtime_get(&priv->pdev->dev); + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, ret); + return ret; + } netif_carrier_off(ndev); for (cnt = 0; cnt < ETH_ALEN; cnt++) @@ -1596,8 +1602,20 @@ static int emac_dev_open(struct net_device *ndev) cpdma_ctlr_start(priv->dma); priv->phydev = NULL; + + if (priv->phy_node) { + priv->phydev = of_phy_connect(ndev, priv->phy_node, + &emac_adjust_link, 0, 0); + if (!priv->phydev) { + dev_err(emac_dev, "could not connect to phy %s\n", + priv->phy_node->full_name); + ret = -ENODEV; + goto err; + } + } + /* use the first phy on the bus if pdata did not give us a phy id */ - if (!priv->phy_id) { + if (!priv->phydev && !priv->phy_id) { struct device *phy; phy = bus_find_device(&mdio_bus_type, NULL, NULL, @@ -1606,7 +1624,7 @@ static int emac_dev_open(struct net_device *ndev) priv->phy_id = dev_name(phy); } - if (priv->phy_id && *priv->phy_id) { + if (!priv->phydev && priv->phy_id && *priv->phy_id) { priv->phydev = phy_connect(ndev, priv->phy_id, &emac_adjust_link, PHY_INTERFACE_MODE_MII); @@ -1627,7 +1645,9 @@ static int emac_dev_open(struct net_device *ndev) "(mii_bus:phy_addr=%s, id=%x)\n", priv->phydev->drv->name, dev_name(&priv->phydev->dev), priv->phydev->phy_id); - } else { + } + + if (!priv->phydev) { /* No PHY , fix the link, speed and duplex settings */ dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); priv->link = 1; @@ -1724,6 +1744,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) struct emac_priv *priv = netdev_priv(ndev); u32 mac_control; u32 stats_clear_mask; + int err; + + err = pm_runtime_get_sync(&priv->pdev->dev); + if (err < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, err); + return &ndev->stats; + } /* update emac hardware stats and reset the registers*/ @@ -1766,6 +1795,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); emac_write(EMAC_TXUNDERRUN, stats_clear_mask); + pm_runtime_put(&priv->pdev->dev); + return &ndev->stats; } @@ -1859,7 +1890,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) static int davinci_emac_probe(struct platform_device *pdev) { int rc = 0; - struct resource *res; + struct resource *res, *res_ctrl; struct net_device *ndev; struct emac_priv *priv; unsigned long hw_ram_addr; @@ -1876,6 +1907,7 @@ static int davinci_emac_probe(struct platform_device *pdev) return -EBUSY; } emac_bus_frequency = clk_get_rate(emac_clk); + devm_clk_put(&pdev->dev, emac_clk); /* TODO: Probe PHY here if possible */ @@ -1917,11 +1949,20 @@ static int davinci_emac_probe(struct platform_device *pdev) rc = PTR_ERR(priv->remap_addr); goto no_pdata; } + + res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res_ctrl) { + priv->ctrl_base = + devm_ioremap_resource(&pdev->dev, res_ctrl); + if (IS_ERR(priv->ctrl_base)) + goto no_pdata; + } else { + priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; + } + priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; ndev->base_addr = (unsigned long)priv->remap_addr; - priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; - hw_ram_addr = pdata->hw_ram_addr; if (!hw_ram_addr) hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; @@ -1980,12 +2021,22 @@ static int davinci_emac_probe(struct platform_device *pdev) ndev->ethtool_ops = ðtool_ops; netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); + pm_runtime_enable(&pdev->dev); + rc = pm_runtime_get_sync(&pdev->dev); + if (rc < 0) { + pm_runtime_put_noidle(&pdev->dev); + dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, rc); + goto no_cpdma_chan; + } + /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); rc = register_netdev(ndev); if (rc) { dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; + pm_runtime_put(&pdev->dev); goto no_cpdma_chan; } @@ -1995,9 +2046,7 @@ static int davinci_emac_probe(struct platform_device *pdev) "(regs: %p, irq: %d)\n", (void *)priv->emac_base_phys, ndev->irq); } - - pm_runtime_enable(&pdev->dev); - pm_runtime_resume(&pdev->dev); + pm_runtime_put(&pdev->dev); return 0; @@ -2071,9 +2120,14 @@ static const struct emac_platform_data am3517_emac_data = { .hw_ram_addr = 0x01e20000, }; +static const struct emac_platform_data dm816_emac_data = { + .version = EMAC_VERSION_2, +}; + static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, + {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 57ec23e8ccfa..bf405f134d3a 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) index &= ~3; } - generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); - - data |= __le32_to_cpu(tmp) & ~mask; tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); @@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) index &= ~3; } - generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); - - data |= __le32_to_cpu(tmp) & ~mask; tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); @@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data) ocp_reg_write(tp, OCP_SRAM_DATA, data); } -static u16 sram_read(struct r8152 *tp, u16 addr) -{ - ocp_reg_write(tp, OCP_SRAM_ADDR, addr); - return ocp_reg_read(tp, OCP_SRAM_DATA); -} - static int read_mii_word(struct net_device *netdev, int phy_id, int reg) { struct r8152 *tp = netdev_priv(netdev); @@ -2518,24 +2506,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EN_10M_PLLOFF; ocp_reg_write(tp, OCP_POWER_CFG, data); - data = sram_read(tp, SRAM_IMPEDANCE); - data &= ~RX_DRIVING_MASK; - sram_write(tp, SRAM_IMPEDANCE, data); + sram_write(tp, SRAM_IMPEDANCE, 0x0b13); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); - data = sram_read(tp, SRAM_LPF_CFG); - data |= LPF_AUTO_TUNE; - sram_write(tp, SRAM_LPF_CFG, data); + /* Enable LPF corner auto tune */ + sram_write(tp, SRAM_LPF_CFG, 0xf70f); - data = sram_read(tp, SRAM_10M_AMP1); - data |= GDAC_IB_UPALL; - sram_write(tp, SRAM_10M_AMP1, data); - data = sram_read(tp, SRAM_10M_AMP2); - data |= AMP_DN; - sram_write(tp, SRAM_10M_AMP2, data); + /* Adjust 10M Amplitude */ + sram_write(tp, SRAM_10M_AMP1, 0x00af); + sram_write(tp, SRAM_10M_AMP2, 0x0208); set_bit(PHY_RESET, &tp->flags); } diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index ea63fbd228ed..352b4f28f82c 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, ret = of_overlay_apply_one(ov, tchild, child); if (ret) return ret; - - /* The properties are already copied, now do the child nodes */ - for_each_child_of_node(child, grandchild) { - ret = of_overlay_apply_single_device_node(ov, tchild, grandchild); - if (ret) { - pr_err("%s: Failed to apply single node @%s/%s\n", - __func__, tchild->full_name, - grandchild->name); - return ret; - } - } } return ret; diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 5b33c6a21807..b0d50d70a8a1 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev) size = dev->coherent_dma_mask; } else { offset = PFN_DOWN(paddr - dma_addr); - dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); + dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); } dev->dma_pfn_offset = offset; @@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb, if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) return NOTIFY_OK; /* not for us */ + /* already populated? (driver using of_populate manually) */ + if (of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + /* pdev_parent may be NULL when no bus platform device */ pdev_parent = of_find_device_by_node(rd->dn->parent); pdev = of_platform_device_create(rd->dn, NULL, @@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb, break; case OF_RECONFIG_CHANGE_REMOVE: + + /* already depopulated? */ + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + /* find our device by node */ pdev = of_find_device_by_node(rd->dn); if (pdev == NULL) diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi index 75976da22b2e..a2b687d5f324 100644 --- a/drivers/of/unittest-data/tests-overlay.dtsi +++ b/drivers/of/unittest-data/tests-overlay.dtsi @@ -176,5 +176,60 @@ }; }; + overlay10 { + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus"; + __overlay__ { + + /* suppress DTC warning */ + #address-cells = <1>; + #size-cells = <0>; + + test-selftest10 { + compatible = "selftest"; + status = "okay"; + reg = <10>; + + #address-cells = <1>; + #size-cells = <0>; + + test-selftest101 { + compatible = "selftest"; + status = "okay"; + reg = <1>; + }; + + }; + }; + }; + }; + + overlay11 { + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus"; + __overlay__ { + + /* suppress DTC warning */ + #address-cells = <1>; + #size-cells = <0>; + + test-selftest11 { + compatible = "selftest"; + status = "okay"; + reg = <11>; + + #address-cells = <1>; + #size-cells = <0>; + + test-selftest111 { + compatible = "selftest"; + status = "okay"; + reg = <1>; + }; + + }; + }; + }; + }; }; }; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 844838e11ef1..41a4a138f53b 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev) } dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + + of_platform_populate(np, NULL, NULL, &pdev->dev); + return 0; } @@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void) selftest(1, "overlay test %d passed\n", 8); } +/* test insertion of a bus with parent devices */ +static void of_selftest_overlay_10(void) +{ + int ret; + char *child_path; + + /* device should disable */ + ret = of_selftest_apply_overlay_check(10, 10, 0, 1); + if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10)) + return; + + child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101", + selftest_path(10)); + if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10)) + return; + + ret = of_path_platform_device_exists(child_path); + kfree(child_path); + if (selftest(ret, "overlay test %d failed; no child device\n", 10)) + return; +} + +/* test insertion of a bus with parent devices (and revert) */ +static void of_selftest_overlay_11(void) +{ + int ret; + + /* device should disable */ + ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1); + if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11)) + return; +} + static void __init of_selftest_overlay(void) { struct device_node *bus_np = NULL; @@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void) of_selftest_overlay_6(); of_selftest_overlay_8(); + of_selftest_overlay_10(); + of_selftest_overlay_11(); + out: of_node_put(bus_np); } diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 37e71ff6408d..dceb9ddfd99a 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus) int i; /* PCI-PCI Bridge */ pci_read_bridge_bases(bus); - for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { - pci_claim_resource(bus->self, i); - } + for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) + pci_claim_bridge_resource(bus->self, i); } else { /* Host-PCI Bridge */ int err; diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 73aef51a28f0..8fb16188cd82 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, } EXPORT_SYMBOL(pci_bus_alloc_resource); +/* + * The @idx resource of @dev should be a PCI-PCI bridge window. If this + * resource fits inside a window of an upstream bridge, do nothing. If it + * overlaps an upstream window but extends outside it, clip the resource so + * it fits completely inside. + */ +bool pci_bus_clip_resource(struct pci_dev *dev, int idx) +{ + struct pci_bus *bus = dev->bus; + struct resource *res = &dev->resource[idx]; + struct resource orig_res = *res; + struct resource *r; + int i; + + pci_bus_for_each_resource(bus, r, i) { + resource_size_t start, end; + + if (!r) + continue; + + if (resource_type(res) != resource_type(r)) + continue; + + start = max(r->start, res->start); + end = min(r->end, res->end); + + if (start > end) + continue; /* no overlap */ + + if (res->start == start && res->end == end) + return false; /* no change */ + + res->start = start; + res->end = end; + dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", + &orig_res, res); + + return true; + } + + return false; +} + void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } /** diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index cab05f31223f..e9d4fd861ba1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) { struct pci_dev *pdev; - if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) + if (pci_is_root_bus(dev->bus) || dev->subordinate || + !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) @@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) { struct pci_dev *pdev; - if (dev->subordinate || !dev->slot) + if (dev->subordinate || !dev->slot || + dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) @@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_try_reset_function); +/* Do any devices on or below this bus prevent a bus reset? */ +static bool pci_bus_resetable(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || + (dev->subordinate && !pci_bus_resetable(dev->subordinate))) + return false; + } + + return true; +} + /* Lock devices from the top of the tree down */ static void pci_bus_lock(struct pci_bus *bus) { @@ -3607,6 +3623,22 @@ unlock: return 0; } +/* Do any devices on or below this slot prevent a bus reset? */ +static bool pci_slot_resetable(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || + (dev->subordinate && !pci_bus_resetable(dev->subordinate))) + return false; + } + + return true; +} + /* Lock devices from the top of the tree down */ static void pci_slot_lock(struct pci_slot *slot) { @@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe) { int rc; - if (!slot) + if (!slot || !pci_slot_resetable(slot)) return -ENOTTY; if (!probe) @@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot); static int pci_bus_reset(struct pci_bus *bus, int probe) { - if (!bus->self) + if (!bus->self || !pci_bus_resetable(bus)) return -ENOTTY; if (probe) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 8aff29a804ff..d54632a1db43 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head); +bool pci_bus_clip_resource(struct pci_dev *dev, int idx); /** * pci_ari_enabled - query ARI forwarding status diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ed6f89b6efe5..e52356aa09b8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, quirk_broken_intx_masking); +static void quirk_no_bus_reset(struct pci_dev *dev) +{ + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; +} + +/* + * Atheros AR93xx chips do not behave after a bus reset. The device will + * throw a Link Down error on AER-capable systems and regardless of AER, + * config space of the device is never accessible again and typically + * causes the system to hang or reset when access is attempted. + * http://www.spinics.net/lists/linux-pci/msg34797.html + */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); + #ifdef CONFIG_ACPI /* * Apple: Shutdown Cactus Ridge Thunderbolt controller. diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 0482235eee92..e3e17f3c0f0f 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus); config space writes, so it's quite possible that an I/O window of the bridge will have some undesirable address (e.g. 0) after the first write. Ditto 64-bit prefetchable MMIO. */ -static void pci_setup_bridge_io(struct pci_bus *bus) +static void pci_setup_bridge_io(struct pci_dev *bridge) { - struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; unsigned long io_mask; @@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus) io_mask = PCI_IO_1K_RANGE_MASK; /* Set up the top and bottom of the PCI I/O segment for this bus. */ - res = bus->resource[0]; + res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_IO) { pci_read_config_word(bridge, PCI_IO_BASE, &l); @@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus) pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); } -static void pci_setup_bridge_mmio(struct pci_bus *bus) +static void pci_setup_bridge_mmio(struct pci_dev *bridge) { - struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; u32 l; /* Set up the top and bottom of the PCI Memory segment for this bus. */ - res = bus->resource[1]; + res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; @@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus) pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); } -static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) +static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) { - struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; u32 l, bu, lu; @@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) /* Set up PREF base/limit. */ bu = lu = 0; - res = bus->resource[2]; + res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_PREFETCH) { l = (region.start >> 16) & 0xfff0; @@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) &bus->busn_res); if (type & IORESOURCE_IO) - pci_setup_bridge_io(bus); + pci_setup_bridge_io(bridge); if (type & IORESOURCE_MEM) - pci_setup_bridge_mmio(bus); + pci_setup_bridge_mmio(bridge); if (type & IORESOURCE_PREFETCH) - pci_setup_bridge_mmio_pref(bus); + pci_setup_bridge_mmio_pref(bridge); pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); } @@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus) __pci_setup_bridge(bus, type); } + +int pci_claim_bridge_resource(struct pci_dev *bridge, int i) +{ + if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END) + return 0; + + if (pci_claim_resource(bridge, i) == 0) + return 0; /* claimed the window */ + + if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI) + return 0; + + if (!pci_bus_clip_resource(bridge, i)) + return -EINVAL; /* clipping didn't change anything */ + + switch (i - PCI_BRIDGE_RESOURCES) { + case 0: + pci_setup_bridge_io(bridge); + break; + case 1: + pci_setup_bridge_mmio(bridge); + break; + case 2: + pci_setup_bridge_mmio_pref(bridge); + break; + default: + return -EINVAL; + } + + if (pci_claim_resource(bridge, i) == 0) + return 0; /* claimed a smaller window */ + + return -EINVAL; +} + /* Check whether the bridge supports optional I/O and prefetchable memory ranges. If not, the respective base/limit registers must be read-only and read as 0. */ diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index e4f65510c87e..89dca77ca038 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev) if (pctldev == NULL) return; - mutex_lock(&pinctrldev_list_mutex); mutex_lock(&pctldev->mutex); - pinctrl_remove_device_debugfs(pctldev); + mutex_unlock(&pctldev->mutex); if (!IS_ERR(pctldev->p)) pinctrl_put(pctldev->p); + mutex_lock(&pinctrldev_list_mutex); + mutex_lock(&pctldev->mutex); /* TODO: check that no pinmuxes are still active? */ list_del(&pctldev->node); /* Destroy descriptor tree */ diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 3c22dbebc80f..43eacc924b7e 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -1398,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); struct rockchip_pin_bank *bank = irq_get_handler_data(irq); - u32 polarity = 0, data = 0; u32 pend; - bool edge_changed = false; - unsigned long flags; dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name); @@ -1409,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS); - if (bank->toggle_edge_mode) { - polarity = readl_relaxed(bank->reg_base + - GPIO_INT_POLARITY); - data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT); - } - while (pend) { unsigned int virq; @@ -1434,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) * needs manual intervention. */ if (bank->toggle_edge_mode & BIT(irq)) { - if (data & BIT(irq)) - polarity &= ~BIT(irq); - else - polarity |= BIT(irq); + u32 data, data_old, polarity; + unsigned long flags; - edge_changed = true; - } + data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT); + do { + spin_lock_irqsave(&bank->slock, flags); - generic_handle_irq(virq); - } + polarity = readl_relaxed(bank->reg_base + + GPIO_INT_POLARITY); + if (data & BIT(irq)) + polarity &= ~BIT(irq); + else + polarity |= BIT(irq); + writel(polarity, + bank->reg_base + GPIO_INT_POLARITY); - if (bank->toggle_edge_mode && edge_changed) { - /* Interrupt params should only be set with ints disabled */ - spin_lock_irqsave(&bank->slock, flags); + spin_unlock_irqrestore(&bank->slock, flags); - data = readl_relaxed(bank->reg_base + GPIO_INTEN); - writel_relaxed(0, bank->reg_base + GPIO_INTEN); - writel(polarity, bank->reg_base + GPIO_INT_POLARITY); - writel(data, bank->reg_base + GPIO_INTEN); + data_old = data; + data = readl_relaxed(bank->reg_base + + GPIO_EXT_PORT); + } while ((data & BIT(irq)) != (data_old & BIT(irq))); + } - spin_unlock_irqrestore(&bank->slock, flags); + generic_handle_irq(virq); } chained_irq_exit(chip, desc); diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index c5cef59f5965..779950c62e53 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev) /* load the gpio chip */ xway_chip.dev = &pdev->dev; - of_gpiochip_add(&xway_chip); ret = gpiochip_add(&xway_chip); if (ret) { - of_gpiochip_remove(&xway_chip); dev_err(&pdev->dev, "Failed to register gpio chip\n"); return ret; } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index e730935fa457..ed7017df065d 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) { - int i = 0; + int i; const struct msm_function *func = pctrl->soc->functions; - for (; i <= pctrl->soc->nfunctions; i++) + for (i = 0; i < pctrl->soc->nfunctions; i++) if (!strcmp(func[i].name, "ps_hold")) { pctrl->restart_nb.notifier_call = msm_ps_hold_restart; pctrl->restart_nb.priority = 128; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 9411eae39a4e..3d21efe11d7b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -2,11 +2,9 @@ * Driver for Dell laptop extras * * Copyright (c) Red Hat <mjg@redhat.com> - * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com> - * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com> * - * Based on documentation in the libsmbios package: - * Copyright (C) 2005-2014 Dell Inc. + * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell + * Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -34,13 +32,6 @@ #include "../../firmware/dcdbas.h" #define BRIGHTNESS_TOKEN 0x7d -#define KBD_LED_OFF_TOKEN 0x01E1 -#define KBD_LED_ON_TOKEN 0x01E2 -#define KBD_LED_AUTO_TOKEN 0x01E3 -#define KBD_LED_AUTO_25_TOKEN 0x02EA -#define KBD_LED_AUTO_50_TOKEN 0x02EB -#define KBD_LED_AUTO_75_TOKEN 0x02EC -#define KBD_LED_AUTO_100_TOKEN 0x02F6 /* This structure will be modified by the firmware when we enter * system management mode, hence the volatiles */ @@ -71,13 +62,6 @@ struct calling_interface_structure { struct quirk_entry { u8 touchpad_led; - - int needs_kbd_timeouts; - /* - * Ordered list of timeouts expressed in seconds. - * The list must end with -1 - */ - int kbd_timeouts[]; }; static struct quirk_entry *quirks; @@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi) return 1; } -/* - * These values come from Windows utility provided by Dell. If any other value - * is used then BIOS silently set timeout to 0 without any error message. - */ -static struct quirk_entry quirk_dell_xps13_9333 = { - .needs_kbd_timeouts = 1, - .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, -}; - static int da_command_address; static int da_command_code; static int da_num_tokens; @@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = { }, .driver_data = &quirk_dell_vostro_v130, }, - { - .callback = dmi_matched, - .ident = "Dell XPS13 9333", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"), - }, - .driver_data = &quirk_dell_xps13_9333, - }, { } }; @@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy) } } -static int find_token_id(int tokenid) +static int find_token_location(int tokenid) { int i; - for (i = 0; i < da_num_tokens; i++) { if (da_tokens[i].tokenID == tokenid) - return i; + return da_tokens[i].location; } return -1; } -static int find_token_location(int tokenid) -{ - int id; - - id = find_token_id(tokenid); - if (id == -1) - return -1; - - return da_tokens[id].location; -} - static struct calling_interface_buffer * dell_send_request(struct calling_interface_buffer *buffer, int class, int select) @@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class, return buffer; } -static inline int dell_smi_error(int value) -{ - switch (value) { - case 0: /* Completed successfully */ - return 0; - case -1: /* Completed with error */ - return -EIO; - case -2: /* Function not supported */ - return -ENXIO; - default: /* Unknown error */ - return -EINVAL; - } -} - /* Derived from information in DellWirelessCtl.cpp: Class 17, select 11 is radio control. It returns an array of 32-bit values. @@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd) else dell_send_request(buffer, 1, 1); - out: +out: release_buffer(); return ret; } @@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd) ret = buffer->output[1]; - out: +out: release_buffer(); return ret; } @@ -849,984 +789,6 @@ static void touchpad_led_exit(void) led_classdev_unregister(&touchpad_led); } -/* - * Derived from information in smbios-keyboard-ctl: - * - * cbClass 4 - * cbSelect 11 - * Keyboard illumination - * cbArg1 determines the function to be performed - * - * cbArg1 0x0 = Get Feature Information - * cbRES1 Standard return codes (0, -1, -2) - * cbRES2, word0 Bitmap of user-selectable modes - * bit 0 Always off (All systems) - * bit 1 Always on (Travis ATG, Siberia) - * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) - * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off - * bit 4 Auto: Input-activity-based On; input-activity based Off - * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off - * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off - * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off - * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off - * bits 9-15 Reserved for future use - * cbRES2, byte2 Reserved for future use - * cbRES2, byte3 Keyboard illumination type - * 0 Reserved - * 1 Tasklight - * 2 Backlight - * 3-255 Reserved for future use - * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap. - * bit 0 Any keystroke - * bit 1 Touchpad activity - * bit 2 Pointing stick - * bit 3 Any mouse - * bits 4-7 Reserved for future use - * cbRES3, byte1 Supported timeout unit bitmap - * bit 0 Seconds - * bit 1 Minutes - * bit 2 Hours - * bit 3 Days - * bits 4-7 Reserved for future use - * cbRES3, byte2 Number of keyboard light brightness levels - * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported). - * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported). - * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported). - * cbRES4, byte3 Maximum acceptable days value (0 if days not supported) - * - * cbArg1 0x1 = Get Current State - * cbRES1 Standard return codes (0, -1, -2) - * cbRES2, word0 Bitmap of current mode state - * bit 0 Always off (All systems) - * bit 1 Always on (Travis ATG, Siberia) - * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) - * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off - * bit 4 Auto: Input-activity-based On; input-activity based Off - * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off - * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off - * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off - * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off - * bits 9-15 Reserved for future use - * Note: Only One bit can be set - * cbRES2, byte2 Currently active auto keyboard illumination triggers. - * bit 0 Any keystroke - * bit 1 Touchpad activity - * bit 2 Pointing stick - * bit 3 Any mouse - * bits 4-7 Reserved for future use - * cbRES2, byte3 Current Timeout - * bits 7:6 Timeout units indicator: - * 00b Seconds - * 01b Minutes - * 10b Hours - * 11b Days - * bits 5:0 Timeout value (0-63) in sec/min/hr/day - * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte - * are set upon return from the [Get feature information] call. - * cbRES3, byte0 Current setting of ALS value that turns the light on or off. - * cbRES3, byte1 Current ALS reading - * cbRES3, byte2 Current keyboard light level. - * - * cbArg1 0x2 = Set New State - * cbRES1 Standard return codes (0, -1, -2) - * cbArg2, word0 Bitmap of current mode state - * bit 0 Always off (All systems) - * bit 1 Always on (Travis ATG, Siberia) - * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) - * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off - * bit 4 Auto: Input-activity-based On; input-activity based Off - * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off - * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off - * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off - * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off - * bits 9-15 Reserved for future use - * Note: Only One bit can be set - * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow - * keyboard to turn off automatically. - * bit 0 Any keystroke - * bit 1 Touchpad activity - * bit 2 Pointing stick - * bit 3 Any mouse - * bits 4-7 Reserved for future use - * cbArg2, byte3 Desired Timeout - * bits 7:6 Timeout units indicator: - * 00b Seconds - * 01b Minutes - * 10b Hours - * 11b Days - * bits 5:0 Timeout value (0-63) in sec/min/hr/day - * cbArg3, byte0 Desired setting of ALS value that turns the light on or off. - * cbArg3, byte2 Desired keyboard light level. - */ - - -enum kbd_timeout_unit { - KBD_TIMEOUT_SECONDS = 0, - KBD_TIMEOUT_MINUTES, - KBD_TIMEOUT_HOURS, - KBD_TIMEOUT_DAYS, -}; - -enum kbd_mode_bit { - KBD_MODE_BIT_OFF = 0, - KBD_MODE_BIT_ON, - KBD_MODE_BIT_ALS, - KBD_MODE_BIT_TRIGGER_ALS, - KBD_MODE_BIT_TRIGGER, - KBD_MODE_BIT_TRIGGER_25, - KBD_MODE_BIT_TRIGGER_50, - KBD_MODE_BIT_TRIGGER_75, - KBD_MODE_BIT_TRIGGER_100, -}; - -#define kbd_is_als_mode_bit(bit) \ - ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS) -#define kbd_is_trigger_mode_bit(bit) \ - ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100) -#define kbd_is_level_mode_bit(bit) \ - ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100) - -struct kbd_info { - u16 modes; - u8 type; - u8 triggers; - u8 levels; - u8 seconds; - u8 minutes; - u8 hours; - u8 days; -}; - -struct kbd_state { - u8 mode_bit; - u8 triggers; - u8 timeout_value; - u8 timeout_unit; - u8 als_setting; - u8 als_value; - u8 level; -}; - -static const int kbd_tokens[] = { - KBD_LED_OFF_TOKEN, - KBD_LED_AUTO_25_TOKEN, - KBD_LED_AUTO_50_TOKEN, - KBD_LED_AUTO_75_TOKEN, - KBD_LED_AUTO_100_TOKEN, - KBD_LED_ON_TOKEN, -}; - -static u16 kbd_token_bits; - -static struct kbd_info kbd_info; -static bool kbd_als_supported; -static bool kbd_triggers_supported; - -static u8 kbd_mode_levels[16]; -static int kbd_mode_levels_count; - -static u8 kbd_previous_level; -static u8 kbd_previous_mode_bit; - -static bool kbd_led_present; - -/* - * NOTE: there are three ways to set the keyboard backlight level. - * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value). - * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels). - * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens) - * - * There are laptops which support only one of these methods. If we want to - * support as many machines as possible we need to implement all three methods. - * The first two methods use the kbd_state structure. The third uses SMBIOS - * tokens. If kbd_info.levels == 0, the machine does not support setting the - * keyboard backlight level via kbd_state.level. - */ - -static int kbd_get_info(struct kbd_info *info) -{ - u8 units; - int ret; - - get_buffer(); - - buffer->input[0] = 0x0; - dell_send_request(buffer, 4, 11); - ret = buffer->output[0]; - - if (ret) { - ret = dell_smi_error(ret); - goto out; - } - - info->modes = buffer->output[1] & 0xFFFF; - info->type = (buffer->output[1] >> 24) & 0xFF; - info->triggers = buffer->output[2] & 0xFF; - units = (buffer->output[2] >> 8) & 0xFF; - info->levels = (buffer->output[2] >> 16) & 0xFF; - - if (units & BIT(0)) - info->seconds = (buffer->output[3] >> 0) & 0xFF; - if (units & BIT(1)) - info->minutes = (buffer->output[3] >> 8) & 0xFF; - if (units & BIT(2)) - info->hours = (buffer->output[3] >> 16) & 0xFF; - if (units & BIT(3)) - info->days = (buffer->output[3] >> 24) & 0xFF; - - out: - release_buffer(); - return ret; -} - -static unsigned int kbd_get_max_level(void) -{ - if (kbd_info.levels != 0) - return kbd_info.levels; - if (kbd_mode_levels_count > 0) - return kbd_mode_levels_count - 1; - return 0; -} - -static int kbd_get_level(struct kbd_state *state) -{ - int i; - - if (kbd_info.levels != 0) - return state->level; - - if (kbd_mode_levels_count > 0) { - for (i = 0; i < kbd_mode_levels_count; ++i) - if (kbd_mode_levels[i] == state->mode_bit) - return i; - return 0; - } - - return -EINVAL; -} - -static int kbd_set_level(struct kbd_state *state, u8 level) -{ - if (kbd_info.levels != 0) { - if (level != 0) - kbd_previous_level = level; - if (state->level == level) - return 0; - state->level = level; - if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF) - state->mode_bit = kbd_previous_mode_bit; - else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) { - kbd_previous_mode_bit = state->mode_bit; - state->mode_bit = KBD_MODE_BIT_OFF; - } - return 0; - } - - if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) { - if (level != 0) - kbd_previous_level = level; - state->mode_bit = kbd_mode_levels[level]; - return 0; - } - - return -EINVAL; -} - -static int kbd_get_state(struct kbd_state *state) -{ - int ret; - - get_buffer(); - - buffer->input[0] = 0x1; - dell_send_request(buffer, 4, 11); - ret = buffer->output[0]; - - if (ret) { - ret = dell_smi_error(ret); - goto out; - } - - state->mode_bit = ffs(buffer->output[1] & 0xFFFF); - if (state->mode_bit != 0) - state->mode_bit--; - - state->triggers = (buffer->output[1] >> 16) & 0xFF; - state->timeout_value = (buffer->output[1] >> 24) & 0x3F; - state->timeout_unit = (buffer->output[1] >> 30) & 0x3; - state->als_setting = buffer->output[2] & 0xFF; - state->als_value = (buffer->output[2] >> 8) & 0xFF; - state->level = (buffer->output[2] >> 16) & 0xFF; - - out: - release_buffer(); - return ret; -} - -static int kbd_set_state(struct kbd_state *state) -{ - int ret; - - get_buffer(); - buffer->input[0] = 0x2; - buffer->input[1] = BIT(state->mode_bit) & 0xFFFF; - buffer->input[1] |= (state->triggers & 0xFF) << 16; - buffer->input[1] |= (state->timeout_value & 0x3F) << 24; - buffer->input[1] |= (state->timeout_unit & 0x3) << 30; - buffer->input[2] = state->als_setting & 0xFF; - buffer->input[2] |= (state->level & 0xFF) << 16; - dell_send_request(buffer, 4, 11); - ret = buffer->output[0]; - release_buffer(); - - return dell_smi_error(ret); -} - -static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old) -{ - int ret; - - ret = kbd_set_state(state); - if (ret == 0) - return 0; - - /* - * When setting the new state fails,try to restore the previous one. - * This is needed on some machines where BIOS sets a default state when - * setting a new state fails. This default state could be all off. - */ - - if (kbd_set_state(old)) - pr_err("Setting old previous keyboard state failed\n"); - - return ret; -} - -static int kbd_set_token_bit(u8 bit) -{ - int id; - int ret; - - if (bit >= ARRAY_SIZE(kbd_tokens)) - return -EINVAL; - - id = find_token_id(kbd_tokens[bit]); - if (id == -1) - return -EINVAL; - - get_buffer(); - buffer->input[0] = da_tokens[id].location; - buffer->input[1] = da_tokens[id].value; - dell_send_request(buffer, 1, 0); - ret = buffer->output[0]; - release_buffer(); - - return dell_smi_error(ret); -} - -static int kbd_get_token_bit(u8 bit) -{ - int id; - int ret; - int val; - - if (bit >= ARRAY_SIZE(kbd_tokens)) - return -EINVAL; - - id = find_token_id(kbd_tokens[bit]); - if (id == -1) - return -EINVAL; - - get_buffer(); - buffer->input[0] = da_tokens[id].location; - dell_send_request(buffer, 0, 0); - ret = buffer->output[0]; - val = buffer->output[1]; - release_buffer(); - - if (ret) - return dell_smi_error(ret); - - return (val == da_tokens[id].value); -} - -static int kbd_get_first_active_token_bit(void) -{ - int i; - int ret; - - for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) { - ret = kbd_get_token_bit(i); - if (ret == 1) - return i; - } - - return ret; -} - -static int kbd_get_valid_token_counts(void) -{ - return hweight16(kbd_token_bits); -} - -static inline int kbd_init_info(void) -{ - struct kbd_state state; - int ret; - int i; - - ret = kbd_get_info(&kbd_info); - if (ret) - return ret; - - kbd_get_state(&state); - - /* NOTE: timeout value is stored in 6 bits so max value is 63 */ - if (kbd_info.seconds > 63) - kbd_info.seconds = 63; - if (kbd_info.minutes > 63) - kbd_info.minutes = 63; - if (kbd_info.hours > 63) - kbd_info.hours = 63; - if (kbd_info.days > 63) - kbd_info.days = 63; - - /* NOTE: On tested machines ON mode did not work and caused - * problems (turned backlight off) so do not use it - */ - kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON); - - kbd_previous_level = kbd_get_level(&state); - kbd_previous_mode_bit = state.mode_bit; - - if (kbd_previous_level == 0 && kbd_get_max_level() != 0) - kbd_previous_level = 1; - - if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) { - kbd_previous_mode_bit = - ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF)); - if (kbd_previous_mode_bit != 0) - kbd_previous_mode_bit--; - } - - if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) | - BIT(KBD_MODE_BIT_TRIGGER_ALS))) - kbd_als_supported = true; - - if (kbd_info.modes & ( - BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) | - BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) | - BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100) - )) - kbd_triggers_supported = true; - - /* kbd_mode_levels[0] is reserved, see below */ - for (i = 0; i < 16; ++i) - if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes)) - kbd_mode_levels[1 + kbd_mode_levels_count++] = i; - - /* - * Find the first supported mode and assign to kbd_mode_levels[0]. - * This should be 0 (off), but we cannot depend on the BIOS to - * support 0. - */ - if (kbd_mode_levels_count > 0) { - for (i = 0; i < 16; ++i) { - if (BIT(i) & kbd_info.modes) { - kbd_mode_levels[0] = i; - break; - } - } - kbd_mode_levels_count++; - } - - return 0; - -} - -static inline void kbd_init_tokens(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) - if (find_token_id(kbd_tokens[i]) != -1) - kbd_token_bits |= BIT(i); -} - -static void kbd_init(void) -{ - int ret; - - ret = kbd_init_info(); - kbd_init_tokens(); - - if (kbd_token_bits != 0 || ret == 0) - kbd_led_present = true; -} - -static ssize_t kbd_led_timeout_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct kbd_state new_state; - struct kbd_state state; - bool convert; - int value; - int ret; - char ch; - u8 unit; - int i; - - ret = sscanf(buf, "%d %c", &value, &ch); - if (ret < 1) - return -EINVAL; - else if (ret == 1) - ch = 's'; - - if (value < 0) - return -EINVAL; - - convert = false; - - switch (ch) { - case 's': - if (value > kbd_info.seconds) - convert = true; - unit = KBD_TIMEOUT_SECONDS; - break; - case 'm': - if (value > kbd_info.minutes) - convert = true; - unit = KBD_TIMEOUT_MINUTES; - break; - case 'h': - if (value > kbd_info.hours) - convert = true; - unit = KBD_TIMEOUT_HOURS; - break; - case 'd': - if (value > kbd_info.days) - convert = true; - unit = KBD_TIMEOUT_DAYS; - break; - default: - return -EINVAL; - } - - if (quirks && quirks->needs_kbd_timeouts) - convert = true; - - if (convert) { - /* Convert value from current units to seconds */ - switch (unit) { - case KBD_TIMEOUT_DAYS: - value *= 24; - case KBD_TIMEOUT_HOURS: - value *= 60; - case KBD_TIMEOUT_MINUTES: - value *= 60; - unit = KBD_TIMEOUT_SECONDS; - } - - if (quirks && quirks->needs_kbd_timeouts) { - for (i = 0; quirks->kbd_timeouts[i] != -1; i++) { - if (value <= quirks->kbd_timeouts[i]) { - value = quirks->kbd_timeouts[i]; - break; - } - } - } - - if (value <= kbd_info.seconds && kbd_info.seconds) { - unit = KBD_TIMEOUT_SECONDS; - } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) { - value /= 60; - unit = KBD_TIMEOUT_MINUTES; - } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) { - value /= (60 * 60); - unit = KBD_TIMEOUT_HOURS; - } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) { - value /= (60 * 60 * 24); - unit = KBD_TIMEOUT_DAYS; - } else { - return -EINVAL; - } - } - - ret = kbd_get_state(&state); - if (ret) - return ret; - - new_state = state; - new_state.timeout_value = value; - new_state.timeout_unit = unit; - - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - - return count; -} - -static ssize_t kbd_led_timeout_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kbd_state state; - int ret; - int len; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - len = sprintf(buf, "%d", state.timeout_value); - - switch (state.timeout_unit) { - case KBD_TIMEOUT_SECONDS: - return len + sprintf(buf+len, "s\n"); - case KBD_TIMEOUT_MINUTES: - return len + sprintf(buf+len, "m\n"); - case KBD_TIMEOUT_HOURS: - return len + sprintf(buf+len, "h\n"); - case KBD_TIMEOUT_DAYS: - return len + sprintf(buf+len, "d\n"); - default: - return -EINVAL; - } - - return len; -} - -static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR, - kbd_led_timeout_show, kbd_led_timeout_store); - -static const char * const kbd_led_triggers[] = { - "keyboard", - "touchpad", - /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */ - "mouse", -}; - -static ssize_t kbd_led_triggers_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct kbd_state new_state; - struct kbd_state state; - bool triggers_enabled = false; - bool als_enabled = false; - bool disable_als = false; - bool enable_als = false; - int trigger_bit = -1; - char trigger[21]; - int i, ret; - - ret = sscanf(buf, "%20s", trigger); - if (ret != 1) - return -EINVAL; - - if (trigger[0] != '+' && trigger[0] != '-') - return -EINVAL; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - if (kbd_als_supported) - als_enabled = kbd_is_als_mode_bit(state.mode_bit); - - if (kbd_triggers_supported) - triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit); - - if (kbd_als_supported) { - if (strcmp(trigger, "+als") == 0) { - if (als_enabled) - return count; - enable_als = true; - } else if (strcmp(trigger, "-als") == 0) { - if (!als_enabled) - return count; - disable_als = true; - } - } - - if (enable_als || disable_als) { - new_state = state; - if (enable_als) { - if (triggers_enabled) - new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS; - else - new_state.mode_bit = KBD_MODE_BIT_ALS; - } else { - if (triggers_enabled) { - new_state.mode_bit = KBD_MODE_BIT_TRIGGER; - kbd_set_level(&new_state, kbd_previous_level); - } else { - new_state.mode_bit = KBD_MODE_BIT_ON; - } - } - if (!(kbd_info.modes & BIT(new_state.mode_bit))) - return -EINVAL; - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - kbd_previous_mode_bit = new_state.mode_bit; - return count; - } - - if (kbd_triggers_supported) { - for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) { - if (!(kbd_info.triggers & BIT(i))) - continue; - if (!kbd_led_triggers[i]) - continue; - if (strcmp(trigger+1, kbd_led_triggers[i]) != 0) - continue; - if (trigger[0] == '+' && - triggers_enabled && (state.triggers & BIT(i))) - return count; - if (trigger[0] == '-' && - (!triggers_enabled || !(state.triggers & BIT(i)))) - return count; - trigger_bit = i; - break; - } - } - - if (trigger_bit != -1) { - new_state = state; - if (trigger[0] == '+') - new_state.triggers |= BIT(trigger_bit); - else { - new_state.triggers &= ~BIT(trigger_bit); - /* NOTE: trackstick bit (2) must be disabled when - * disabling touchpad bit (1), otherwise touchpad - * bit (1) will not be disabled */ - if (trigger_bit == 1) - new_state.triggers &= ~BIT(2); - } - if ((kbd_info.triggers & new_state.triggers) != - new_state.triggers) - return -EINVAL; - if (new_state.triggers && !triggers_enabled) { - if (als_enabled) - new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS; - else { - new_state.mode_bit = KBD_MODE_BIT_TRIGGER; - kbd_set_level(&new_state, kbd_previous_level); - } - } else if (new_state.triggers == 0) { - if (als_enabled) - new_state.mode_bit = KBD_MODE_BIT_ALS; - else - kbd_set_level(&new_state, 0); - } - if (!(kbd_info.modes & BIT(new_state.mode_bit))) - return -EINVAL; - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - if (new_state.mode_bit != KBD_MODE_BIT_OFF) - kbd_previous_mode_bit = new_state.mode_bit; - return count; - } - - return -EINVAL; -} - -static ssize_t kbd_led_triggers_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kbd_state state; - bool triggers_enabled; - int level, i, ret; - int len = 0; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - len = 0; - - if (kbd_triggers_supported) { - triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit); - level = kbd_get_level(&state); - for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) { - if (!(kbd_info.triggers & BIT(i))) - continue; - if (!kbd_led_triggers[i]) - continue; - if ((triggers_enabled || level <= 0) && - (state.triggers & BIT(i))) - buf[len++] = '+'; - else - buf[len++] = '-'; - len += sprintf(buf+len, "%s ", kbd_led_triggers[i]); - } - } - - if (kbd_als_supported) { - if (kbd_is_als_mode_bit(state.mode_bit)) - len += sprintf(buf+len, "+als "); - else - len += sprintf(buf+len, "-als "); - } - - if (len) - buf[len - 1] = '\n'; - - return len; -} - -static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR, - kbd_led_triggers_show, kbd_led_triggers_store); - -static ssize_t kbd_led_als_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct kbd_state state; - struct kbd_state new_state; - u8 setting; - int ret; - - ret = kstrtou8(buf, 10, &setting); - if (ret) - return ret; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - new_state = state; - new_state.als_setting = setting; - - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - - return count; -} - -static ssize_t kbd_led_als_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kbd_state state; - int ret; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - return sprintf(buf, "%d\n", state.als_setting); -} - -static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR, - kbd_led_als_show, kbd_led_als_store); - -static struct attribute *kbd_led_attrs[] = { - &dev_attr_stop_timeout.attr, - &dev_attr_start_triggers.attr, - &dev_attr_als_setting.attr, - NULL, -}; -ATTRIBUTE_GROUPS(kbd_led); - -static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev) -{ - int ret; - u16 num; - struct kbd_state state; - - if (kbd_get_max_level()) { - ret = kbd_get_state(&state); - if (ret) - return 0; - ret = kbd_get_level(&state); - if (ret < 0) - return 0; - return ret; - } - - if (kbd_get_valid_token_counts()) { - ret = kbd_get_first_active_token_bit(); - if (ret < 0) - return 0; - for (num = kbd_token_bits; num != 0 && ret > 0; --ret) - num &= num - 1; /* clear the first bit set */ - if (num == 0) - return 0; - return ffs(num) - 1; - } - - pr_warn("Keyboard brightness level control not supported\n"); - return 0; -} - -static void kbd_led_level_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct kbd_state state; - struct kbd_state new_state; - u16 num; - - if (kbd_get_max_level()) { - if (kbd_get_state(&state)) - return; - new_state = state; - if (kbd_set_level(&new_state, value)) - return; - kbd_set_state_safe(&new_state, &state); - return; - } - - if (kbd_get_valid_token_counts()) { - for (num = kbd_token_bits; num != 0 && value > 0; --value) - num &= num - 1; /* clear the first bit set */ - if (num == 0) - return; - kbd_set_token_bit(ffs(num) - 1); - return; - } - - pr_warn("Keyboard brightness level control not supported\n"); -} - -static struct led_classdev kbd_led = { - .name = "dell::kbd_backlight", - .brightness_set = kbd_led_level_set, - .brightness_get = kbd_led_level_get, - .groups = kbd_led_groups, -}; - -static int __init kbd_led_init(struct device *dev) -{ - kbd_init(); - if (!kbd_led_present) - return -ENODEV; - kbd_led.max_brightness = kbd_get_max_level(); - if (!kbd_led.max_brightness) { - kbd_led.max_brightness = kbd_get_valid_token_counts(); - if (kbd_led.max_brightness) - kbd_led.max_brightness--; - } - return led_classdev_register(dev, &kbd_led); -} - -static void brightness_set_exit(struct led_classdev *led_cdev, - enum led_brightness value) -{ - /* Don't change backlight level on exit */ -}; - -static void kbd_led_exit(void) -{ - if (!kbd_led_present) - return; - kbd_led.brightness_set = brightness_set_exit; - led_classdev_unregister(&kbd_led); -} - static int __init dell_init(void) { int max_intensity = 0; @@ -1879,8 +841,6 @@ static int __init dell_init(void) if (quirks && quirks->touchpad_led) touchpad_led_init(&platform_device->dev); - kbd_led_init(&platform_device->dev); - dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); if (dell_laptop_dir != NULL) debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, @@ -1948,7 +908,6 @@ static void __exit dell_exit(void) debugfs_remove_recursive(dell_laptop_dir); if (quirks && quirks->touchpad_led) touchpad_led_exit(); - kbd_led_exit(); i8042_remove_filter(dell_laptop_i8042_filter); cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); @@ -1965,7 +924,5 @@ module_init(dell_init); module_exit(dell_exit); MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); -MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); -MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); MODULE_DESCRIPTION("Dell laptop driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index df4e27cd996a..9219953ee949 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->u.scratch = 0; ipr_cmd->sibling = NULL; + ipr_cmd->eh_comp = NULL; ipr_cmd->fast_done = fast_done; init_timer(&ipr_cmd->timer); } @@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } @@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev) return rc; } +/** + * ipr_match_lun - Match function for specified LUN + * @ipr_cmd: ipr command struct + * @device: device to match (sdev) + * + * Returns: + * 1 if command matches sdev / 0 if command does not match sdev + **/ +static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) +{ + if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) + return 1; + return 0; +} + +/** + * ipr_wait_for_ops - Wait for matching commands to complete + * @ipr_cmd: ipr command struct + * @device: device to match (sdev) + * @match: match function to use + * + * Returns: + * SUCCESS / FAILED + **/ +static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, + int (*match)(struct ipr_cmnd *, void *)) +{ + struct ipr_cmnd *ipr_cmd; + int wait; + unsigned long flags; + struct ipr_hrr_queue *hrrq; + signed long timeout = IPR_ABORT_TASK_TIMEOUT; + DECLARE_COMPLETION_ONSTACK(comp); + + ENTER; + do { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = ∁ + wait++; + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) { + timeout = wait_for_completion_timeout(&comp, timeout); + + if (!timeout) { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = NULL; + wait++; + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) + dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); + LEAVE; + return wait ? FAILED : SUCCESS; + } + } + } while (wait); + + LEAVE; + return SUCCESS; +} + static int ipr_eh_host_reset(struct scsi_cmnd *cmd) { struct ipr_ioa_cfg *ioa_cfg; @@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) { int rc; + struct ipr_ioa_cfg *ioa_cfg; + + ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irq(cmd->device->host->host_lock); rc = __ipr_eh_dev_reset(cmd); spin_unlock_irq(cmd->device->host->host_lock); + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); + return rc; } @@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) { unsigned long flags; int rc; + struct ipr_ioa_cfg *ioa_cfg; ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); rc = ipr_cancel_op(scsi_cmd); spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); LEAVE; return rc; } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index b4f3eec51bc9..ec03b42fa2b9 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1606,6 +1606,7 @@ struct ipr_cmnd { struct scsi_device *sdev; } u; + struct completion *eh_comp; struct ipr_hrr_queue *hrrq; struct ipr_ioa_cfg *ioa_cfg; }; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b8b51bc29b4..4aca1b0378c2 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) req_opcode = cmd[3]; req_sa = get_unaligned_be16(cmd + 4); alloc_len = get_unaligned_be32(cmd + 6); - if (alloc_len < 4 && alloc_len > 0xffff) { + if (alloc_len < 4 || alloc_len > 0xffff) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); return check_condition_result; } @@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) a_len = 8192; else a_len = alloc_len; - arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL); + arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC); if (NULL == arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d5c0b8cb0bb..17bb541f7cc2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1143,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd) struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; int ivecs, count; - BUG_ON(prot_sdb == NULL); + if (prot_sdb == NULL) { + /* + * This can happen if someone (e.g. multipath) + * queues a command to a device on an adapter + * that does not support DIX. + */ + WARN_ON_ONCE(1); + error = BLKPREP_KILL; + goto err_exit; + } + ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig index 81784c6f7b88..77d8753f6ba4 100644 --- a/drivers/staging/media/tlg2300/Kconfig +++ b/drivers/staging/media/tlg2300/Kconfig @@ -1,6 +1,7 @@ config VIDEO_TLG2300 tristate "Telegent TLG2300 USB video capture support (Deprecated)" depends on VIDEO_DEV && I2C && SND && DVB_CORE + depends on MEDIA_USB_SUPPORT select VIDEO_TUNER select VIDEO_TVEEPROM depends on RC_CORE diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 5927c0a98a74..bcfd2a22208f 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c @@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = { .shutdown = cdns_wdt_shutdown, .driver = { .name = "cdns-wdt", - .owner = THIS_MODULE, .of_match_table = cdns_wdt_of_match, .pm = &cdns_wdt_pm_ops, }, diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index d6add516a7a7..5142bbabe027 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -52,6 +52,8 @@ #define IMX2_WDT_WRSR 0x04 /* Reset Status Register */ #define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */ +#define IMX2_WDT_WMCR 0x08 /* Misc Register */ + #define IMX2_WDT_MAX_TIME 128 #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ @@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) imx2_wdt_ping_if_active(wdog); + /* + * Disable the watchdog power down counter at boot. Otherwise the power + * down counter will pull down the #WDOG interrupt line for one clock + * cycle. + */ + regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0); + ret = watchdog_register_device(wdog); if (ret) { dev_err(&pdev->dev, "cannot register watchdog device\n"); @@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -/* Disable watchdog if it is active during suspend */ +/* Disable watchdog if it is active or non-active but still running */ static int imx2_wdt_suspend(struct device *dev) { struct watchdog_device *wdog = dev_get_drvdata(dev); struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); - imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); - imx2_wdt_ping(wdog); + /* The watchdog IP block is running */ + if (imx2_wdt_is_running(wdev)) { + imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); + imx2_wdt_ping(wdog); - /* Watchdog has been stopped but IP block is still running */ - if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev)) - del_timer_sync(&wdev->timer); + /* The watchdog is not active */ + if (!watchdog_active(wdog)) + del_timer_sync(&wdev->timer); + } clk_disable_unprepare(wdev->clk); @@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev) clk_prepare_enable(wdev->clk); if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) { - /* Resumes from deep sleep we need restart - * the watchdog again. + /* + * If the watchdog is still active and resumes + * from deep sleep state, need to restart the + * watchdog again. */ imx2_wdt_setup(wdog); imx2_wdt_set_timeout(wdog, wdog->timeout); imx2_wdt_ping(wdog); } else if (imx2_wdt_is_running(wdev)) { + /* Resuming from non-deep sleep state. */ + imx2_wdt_set_timeout(wdog, wdog->timeout); imx2_wdt_ping(wdog); - mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2); + /* + * But the watchdog is not active, then start + * the timer again. + */ + if (!watchdog_active(wdog)) + mod_timer(&wdev->timer, + jiffies + wdog->timeout * HZ / 2); } return 0; diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c index ef6a298e8c45..1f4155ee3404 100644 --- a/drivers/watchdog/meson_wdt.c +++ b/drivers/watchdog/meson_wdt.c @@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = { .remove = meson_wdt_remove, .shutdown = meson_wdt_shutdown, .driver = { - .owner = THIS_MODULE, .name = DRV_NAME, .of_match_table = meson_wdt_dt_ids, }, |