summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/ahci_xgene.c14
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-core.c36
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/sata_dwc_460ex.c26
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/block/nvme-core.c2
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/bus/mvebu-mbus.c13
-rw-r--r--drivers/clocksource/bcm_kona_timer.c9
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpiolib-of.c10
-rw-r--r--drivers/gpio/gpiolib-sysfs.c92
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c80
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c176
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c12
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c11
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c28
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c14
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c7
-rw-r--r--drivers/gpu/drm/radeon/nid.h24
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c39
-rw-r--r--drivers/gpu/drm/radeon/sid.h18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/i5500_temp.c149
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c23
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c12
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/input/mouse/elantech.c18
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h47
-rw-r--r--drivers/input/serio/i8042.c14
-rw-r--r--drivers/iommu/Kconfig34
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c12
-rw-r--r--drivers/iommu/amd_iommu_v2.c29
-rw-r--r--drivers/iommu/arm-smmu.c935
-rw-r--r--drivers/iommu/fsl_pamu.c216
-rw-r--r--drivers/iommu/fsl_pamu.h15
-rw-r--r--drivers/iommu/fsl_pamu_domain.c173
-rw-r--r--drivers/iommu/io-pgtable-arm.c986
-rw-r--r--drivers/iommu/io-pgtable.c82
-rw-r--r--drivers/iommu/io-pgtable.h143
-rw-r--r--drivers/iommu/ipmmu-vmsa.c674
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c3
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-omap-intc.c26
-rw-r--r--drivers/md/dm-cache-metadata.c104
-rw-r--r--drivers/md/dm-cache-target.c89
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/dm.c9
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c23
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c5
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c4
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c24
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c19
-rw-r--r--drivers/mfd/da9052-core.c3
-rw-r--r--drivers/mfd/rtsx_usb.c12
-rw-r--r--drivers/mfd/tps65218.c12
-rw-r--r--drivers/net/can/c_can/c_can.c3
-rw-r--r--drivers/net/can/c_can/c_can_platform.c29
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/m_can/m_can.c5
-rw-r--r--drivers/net/can/usb/kvaser_usb.c57
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c41
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c196
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c21
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c49
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c96
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/usb/r8152.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/platform.c11
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi55
-rw-r--r--drivers/of/unittest.c39
-rw-r--r--drivers/parisc/lba_pci.c5
-rw-r--r--drivers/pci/bus.c43
-rw-r--r--drivers/pci/pci.c40
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/setup-bus.c56
-rw-r--r--drivers/pinctrl/core.c5
-rw-r--r--drivers/pinctrl/pinctrl-at91.c108
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c45
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c1055
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/s2mps11.c42
-rw-r--r--drivers/rtc/rtc-s5m.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c117
-rw-r--r--drivers/s390/net/qeth_l2_main.c220
-rw-r--r--drivers/s390/net/qeth_l3_main.c50
-rw-r--r--drivers/scsi/ipr.c92
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/spi/spi-dw-mid.c1
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/media/tlg2300/Kconfig1
-rw-r--r--drivers/staging/nvec/nvec.c9
-rw-r--r--drivers/usb/core/otg_whitelist.h5
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/watchdog/cadence_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c40
-rw-r--r--drivers/watchdog/meson_wdt.c1
202 files changed, 4572 insertions, 3714 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 694d5a70d6ce..c70d6e45dc10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
-source "drivers/soc/Kconfig"
-
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 5277a0ee5704..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- dev->irq = 0;
dev->irq_managed = 0;
}
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a3a13605a9c4..5f601553b9b0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -835,6 +835,7 @@ config PATA_AT32
config PATA_AT91
tristate "PATA support for AT91SAM9260"
depends on ARM && SOC_AT91SAM9
+ depends on !ARCH_MULTIPLATFORM
help
This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 49f1e6890587..33bb06e006c9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index feeb8f1e2fe8..cbcd20810355 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -125,10 +125,11 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
* xgene_ahci_qc_issue - Issue commands to the device
* @qc: Command to issue
*
- * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
- * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
- * state machine goes into the CMFatalErrorUpdate state and locks up. By
- * restarting the dma engine, it removes the controller out of lock up state.
+ * Due to Hardware errata for IDENTIFY DEVICE command and PACKET
+ * command of ATAPI protocol set, the controller cannot clear the BSY bit
+ * after receiving the PIO setup FIS. This results in the DMA state machine
+ * going into the CMFatalErrorUpdate state and locks up. By restarting the
+ * DMA engine, it removes the controller out of lock up state.
*/
static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
{
@@ -137,7 +138,8 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
struct xgene_ahci_context *ctx = hpriv->plat_data;
int rc = 0;
- if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+ if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
+ (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET)))
xgene_ahci_restart_engine(ap);
rc = ahci_qc_issue(qc);
@@ -188,7 +190,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
*
* Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
*/
- id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 97683e45ab04..61a9c07e0dff 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
devslp = readl(port_mmio + PORT_DEVSLP);
if (!(devslp & PORT_DEVSLP_DSP)) {
- dev_err(ap->host->dev, "port does not support device sleep\n");
+ dev_info(ap->host->dev, "port does not support device sleep\n");
return;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 5c84fb5c3372..d1a05f9bb91f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4233,10 +4233,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+ * (Return Zero After Trim) flags in the ATA Command Set are
+ * unreliable in the sense that they only define what happens if
+ * the device successfully executed the DSM TRIM command. TRIM
+ * is only advisory, however, and the device is free to silently
+ * ignore all or parts of the request.
+ *
+ * Whitelist drives that are known to reliably return zeroes
+ * after TRIM.
+ */
+
+ /*
+ * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
+ * that model before whitelisting all other intel SSDs.
+ */
+ { "INTEL*SSDSC2MH*", NULL, 0, },
+
+ { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4748,7 +4771,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
return NULL;
for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
- tag = tag < max_queue ? tag : 0;
+ if (ap->flags & ATA_FLAG_LOWTAG)
+ tag = i;
+ else
+ tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3dbec8954c86..8d00c2638bed 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2389,6 +2389,7 @@ const char *ata_get_cmd_descript(u8 command)
return NULL;
}
+EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
/**
* ata_eh_link_report - report error handling to user
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e364e86e84d7..6abd17a85b13 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id)) {
- rbuf[14] |= 0x80; /* TPE */
+ rbuf[14] |= 0x80; /* LBPME */
- if (ata_id_has_zero_after_trim(args->id))
- rbuf[14] |= 0x40; /* TPRZ */
+ if (ata_id_has_zero_after_trim(args->id) &&
+ dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
+ }
}
}
-
return 0;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index db90aa35cb71..2e86e3b85266 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
DPRINTK("ENTER\n");
cancel_delayed_work_sync(&ap->sff_pio_task);
+
+ /*
+ * We wanna reset the HSM state to IDLE. If we do so without
+ * grabbing the port lock, critical sections protected by it which
+ * expect the HSM state to stay stable may get surprised. For
+ * example, we may set IDLE in between the time
+ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+ */
+ spin_lock_irq(ap->lock);
ap->hsm_task_state = HSM_ST_IDLE;
+ spin_unlock_irq(ap->lock);
+
ap->sff_pio_task_link = NULL;
if (ata_msg_ctl(ap))
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index c7ddef89e7b0..8e8248179d20 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
if (err) {
dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
" %d\n", __func__, err);
- goto error_out;
+ return err;
}
/* Enabe DMA */
@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
sata_dma_regs);
return 0;
-
-error_out:
- dma_dwc_exit(hsdev);
-
- return err;
}
static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
char *ver = (char *)&versionr;
u8 *base = NULL;
int err = 0;
- int irq, rc;
+ int irq;
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
if (irq == NO_IRQ) {
dev_err(&ofdev->dev, "no SATA DMA irq\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Get physical SATA DMA register base address */
@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
" address\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Save dev for later use in dev_xxx() routines */
host_pvt.dwc_dev = &ofdev->dev;
/* Initialize AHB DMAC */
- dma_dwc_init(hsdev, irq);
+ err = dma_dwc_init(hsdev, irq);
+ if (err)
+ goto error_dma_iomap;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
* device discovery process, invoking our port_start() handler &
* error_handler() to execute a dummy Softreset EH session
*/
- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
-
- if (rc != 0)
+ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+ if (err)
dev_err(&ofdev->dev, "failed to activate host");
dev_set_drvdata(&ofdev->dev, host);
@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
error_out:
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
-
+error_dma_iomap:
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
error_iomap:
iounmap(base);
error_kmalloc:
@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
iounmap(hsdev->reg_base);
kfree(hsdev);
kfree(host);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index d81b20ddb527..ea655949023f 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -246,7 +246,7 @@ enum {
/* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
- ATA_FLAG_AN | ATA_FLAG_PMP,
+ ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf,
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index cb529e9a82dd..d826bf3e62c8 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -106,7 +106,7 @@ struct nvme_queue {
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
- u16 cq_vector;
+ s16 cq_vector;
u16 sq_head;
u16 sq_tail;
u16 cq_head;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3ec85dfce124..8a86b62466f7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened. We
- * drop it again if there is no overlap.
- *
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
- int counter;
+ int counter = 0;
if (!rbd_dev->parent_spec)
return false;
- counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
- if (counter > 0 && rbd_dev->parent_overlap)
- return true;
-
- /* Image was flattened, but parent is not yet torn down */
+ down_read(&rbd_dev->header_rwsem);
+ if (rbd_dev->parent_overlap)
+ counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+ up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
- return false;
+ return counter > 0;
}
/*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
- smp_mb();
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* treat it specially.
*/
rbd_dev->parent_overlap = overlap;
- smp_mb();
if (!overlap) {
/* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
- /* Drop parent reference unless it's already been done (or none) */
-
- if (rbd_dev->parent_overlap)
- rbd_dev_parent_put(rbd_dev);
+ rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index eb7682dc123b..81bf297f1034 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
}
/* Checks whether the given window number is available */
+
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
+ * isn't currently taking into account this special case, which means
+ * that when window 13 is actually used, the remap registers are left
+ * to 0, making the device using this MBus window unavailable. The
+ * quick fix for stable is to not use window 13. A follow up patch
+ * will correctly handle this window.
+*/
static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
const int win)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+ if (win == 13)
+ return false;
+
return !(ctrl & WIN_CTRL_ENABLE);
}
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0595dc6c453e..f1e33d08dd83 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base)
}
static void
-kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
+kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
- void __iomem *base = IOMEM(timer_base);
int loop_limit = 4;
/*
@@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
*/
while (--loop_limit) {
- *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
- *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
- if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
+ *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
+ *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
+ if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
}
if (!loop_limit) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 9403061a2acc..83564c9cfdbe 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
writel_relaxed(value, reg_base + offset);
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
- switch (offset & EXYNOS4_MCT_L_MASK) {
+ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+ switch (offset & ~EXYNOS4_MCT_L_MASK) {
case MCT_L_TCON_OFFSET:
mask = 1 << 3; /* L_TCON write status */
break;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 0f665b8f2461..f150ca82bfaf 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
- ced->cpumask = cpumask_of(0);
+ ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
ced->suspend = sh_tmu_clock_event_suspend;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 55d4803d71b0..3d9e08f7e823 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
if (pending & BIT(gpio)) {
virq = irq_find_mapping(cg->chip.irqdomain, gpio);
- generic_handle_irq(virq);
+ handle_nested_irq(virq);
}
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 604dbe60bdee..08261f2b3a82 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
return false;
ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
- if (ret < 0)
- return false;
+ if (ret < 0) {
+ /* We've found the gpio chip, but the translation failed.
+ * Return true to stop looking and return the translation
+ * error via out_gpio
+ */
+ gg_data->out_gpio = ERR_PTR(ret);
+ return true;
+ }
gg_data->out_gpio = gpiochip_get_desc(gc, ret);
return true;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 2ac1800b58bb..f62aa115d79a 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
-static const DEVICE_ATTR(value, 0644,
+static DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev,
return status ? : size;
}
-static const DEVICE_ATTR(active_low, 0644,
+static DEVICE_ATTR(active_low, 0644,
gpio_active_low_show, gpio_active_low_store);
-static const struct attribute *gpio_attrs[] = {
+static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+ bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags);
+
+ if (attr == &dev_attr_direction.attr) {
+ if (!show_direction)
+ mode = 0;
+ } else if (attr == &dev_attr_edge.attr) {
+ if (gpiod_to_irq(desc) < 0)
+ mode = 0;
+ if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static struct attribute *gpio_attrs[] = {
+ &dev_attr_direction.attr,
+ &dev_attr_edge.attr,
&dev_attr_value.attr,
&dev_attr_active_low.attr,
NULL,
};
-static const struct attribute_group gpio_attr_group = {
- .attrs = (struct attribute **) gpio_attrs,
+static const struct attribute_group gpio_group = {
+ .attrs = gpio_attrs,
+ .is_visible = gpio_is_visible,
+};
+
+static const struct attribute_group *gpio_groups[] = {
+ &gpio_group,
+ NULL
};
/*
@@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
}
static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
-static const struct attribute *gpiochip_attrs[] = {
+static struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
&dev_attr_ngpio.attr,
NULL,
};
-
-static const struct attribute_group gpiochip_attr_group = {
- .attrs = (struct attribute **) gpiochip_attrs,
-};
+ATTRIBUTE_GROUPS(gpiochip);
/*
* /sys/class/gpio/export ... write-only
@@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
goto fail_unlock;
}
- if (!desc->chip->direction_input || !desc->chip->direction_output)
- direction_may_change = false;
+ if (desc->chip->direction_input && desc->chip->direction_output &&
+ direction_may_change) {
+ set_bit(FLAG_SYSFS_DIR, &desc->flags);
+ }
+
spin_unlock_irqrestore(&gpio_lock, flags);
offset = gpio_chip_hwgpio(desc);
if (desc->chip->names && desc->chip->names[offset])
ioname = desc->chip->names[offset];
- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u",
- desc_to_gpio(desc));
+ dev = device_create_with_groups(&gpio_class, desc->chip->dev,
+ MKDEV(0, 0), desc, gpio_groups,
+ ioname ? ioname : "gpio%u",
+ desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
goto fail_unlock;
}
- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
- if (status)
- goto fail_unregister_device;
-
- if (direction_may_change) {
- status = device_create_file(dev, &dev_attr_direction);
- if (status)
- goto fail_unregister_device;
- }
-
- if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
- !test_bit(FLAG_IS_OUT, &desc->flags))) {
- status = device_create_file(dev, &dev_attr_edge);
- if (status)
- goto fail_unregister_device;
- }
-
set_bit(FLAG_EXPORT, &desc->flags);
mutex_unlock(&sysfs_lock);
return 0;
-fail_unregister_device:
- device_unregister(dev);
fail_unlock:
mutex_unlock(&sysfs_lock);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -718,6 +729,7 @@ void gpiod_unexport(struct gpio_desc *desc)
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev) {
gpio_setup_irq(desc, dev, 0);
+ clear_bit(FLAG_SYSFS_DIR, &desc->flags);
clear_bit(FLAG_EXPORT, &desc->flags);
} else
status = -ENODEV;
@@ -750,13 +762,13 @@ int gpiochip_export(struct gpio_chip *chip)
/* use chip->base for the ID; it's already known to be unique */
mutex_lock(&sysfs_lock);
- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
- "gpiochip%d", chip->base);
- if (!IS_ERR(dev)) {
- status = sysfs_create_group(&dev->kobj,
- &gpiochip_attr_group);
- } else
+ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
+ chip, gpiochip_groups,
+ "gpiochip%d", chip->base);
+ if (IS_ERR(dev))
status = PTR_ERR(dev);
+ else
+ status = 0;
chip->exported = (status == 0);
mutex_unlock(&sysfs_lock);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 487afe6f22fc..568aa2b6bdb0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip)
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
status = base;
- goto unlock;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
}
chip->base = base;
}
status = gpiochip_add_to_list(chip);
+ if (status) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
+ }
- if (status == 0) {
- for (id = 0; id < chip->ngpio; id++) {
- struct gpio_desc *desc = &descs[id];
- desc->chip = chip;
-
- /* REVISIT: most hardware initializes GPIOs as
- * inputs (often with pullups enabled) so power
- * usage is minimized. Linux code should set the
- * gpio direction first thing; but until it does,
- * and in case chip->get_direction is not set,
- * we may expose the wrong direction in sysfs.
- */
- desc->flags = !chip->direction_input
- ? (1 << FLAG_IS_OUT)
- : 0;
- }
+ for (id = 0; id < chip->ngpio; id++) {
+ struct gpio_desc *desc = &descs[id];
+
+ desc->chip = chip;
+
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
chip->desc = descs;
@@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip)
of_gpiochip_add(chip);
acpi_gpiochip_add(chip);
- if (status)
- goto fail;
-
status = gpiochip_export(chip);
if (status)
- goto fail;
+ goto err_remove_chip;
pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
chip->base, chip->base + chip->ngpio - 1,
@@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip)
return 0;
-unlock:
+err_remove_chip:
+ acpi_gpiochip_remove(chip);
+ of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
-fail:
- kfree(descs);
chip->desc = NULL;
+err_free_descs:
+ kfree(descs);
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
@@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip)
unsigned long flags;
unsigned id;
- acpi_gpiochip_remove(chip);
-
- spin_lock_irqsave(&gpio_lock, flags);
+ gpiochip_unexport(chip);
gpiochip_irqchip_remove(chip);
+
+ acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
for (id = 0; id < chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip)
list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
- gpiochip_unexport(chip);
kfree(chip->desc);
chip->desc = NULL;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index e3a52113a541..550a5eafbd38 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -77,6 +77,7 @@ struct gpio_desc {
#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */
#define ID_SHIFT 16 /* add new flags before this one */
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index be6246de5091..307a309110e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -8,7 +8,6 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
kfd_process.o kfd_queue.o kfd_mqd_manager.o \
kfd_kernel_queue.o kfd_packet_manager.o \
- kfd_process_queue_manager.o kfd_device_queue_manager.o \
- kfd_interrupt.o
+ kfd_process_queue_manager.o kfd_device_queue_manager.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 43884ebd4303..25bc47f3c1cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */
- size = max_num_of_processes *
- max_num_of_queues_per_process *
- kfd->device_info->mqd_size_aligned;
+ size = max_num_of_queues_per_device *
+ kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */
size += 512 * 1024;
@@ -192,13 +192,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_topology_add_device_error;
}
- if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing interrupts for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
- goto kfd_interrupt_error;
- }
-
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
"Error initializing iommuv2 for device (%x:%x)\n",
@@ -237,8 +230,6 @@ dqm_start_error:
device_queue_manager_error:
amd_iommu_free_device(kfd->pdev);
device_iommu_pasid_error:
- kfd_interrupt_exit(kfd);
-kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
kfd2kgd->fini_sa_manager(kfd->kgd);
@@ -254,7 +245,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
if (kfd->init_complete) {
device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev);
- kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
}
@@ -296,13 +286,5 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- if (kfd->init_complete) {
- spin_lock(&kfd->interrupt_lock);
-
- if (kfd->interrupts_active
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
- schedule_work(&kfd->interrupt_work);
-
- spin_unlock(&kfd->interrupt_lock);
- }
+ /* Process interrupts / schedule work as necessary */
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 9c8961d22360..0d8694f015c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) {
@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list);
dqm->queue_count++;
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
}
@@ -280,7 +295,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
q->queue);
retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
- q->queue, q->properties.write_ptr);
+ q->queue, (uint32_t __user *) q->properties.write_ptr);
if (retval != 0) {
deallocate_hqd(dqm, q);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
dqm->queue_count--;
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe;
+ /*
+ * HPD buffer on GTT is allocated by amdkfd, no need to waste
+ * space in GTT for pipelines we don't initialize
+ */
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
- kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+ kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__);
- retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+ retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0)
return retval;
@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--;
qpd->is_debug = false;
execute_queues_cpsch(dqm, false);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
mutex_unlock(&dqm->lock);
}
@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ retval = -EPERM;
+ goto out;
+ }
+
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false);
}
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8ae35..52035bf0c1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -130,6 +130,7 @@ struct device_queue_manager {
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
+ unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
unsigned int vmid_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
deleted file mode 100644
index 5b999095a1f7..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * KFD Interrupts.
- *
- * AMD GPUs deliver interrupts by pushing an interrupt description onto the
- * interrupt ring and then sending an interrupt. KGD receives the interrupt
- * in ISR and sends us a pointer to each new entry on the interrupt ring.
- *
- * We generally can't process interrupt-signaled events from ISR, so we call
- * out to each interrupt client module (currently only the scheduler) to ask if
- * each interrupt is interesting. If they return true, then it requires further
- * processing so we copy it to an internal interrupt ring and call each
- * interrupt client again from a work-queue.
- *
- * There's no acknowledgment for the interrupts we use. The hardware simply
- * queues a new interrupt each time without waiting.
- *
- * The fixed-size internal queue means that it's possible for us to lose
- * interrupts because we have no back-pressure to the hardware.
- */
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include "kfd_priv.h"
-
-#define KFD_INTERRUPT_RING_SIZE 256
-
-static void interrupt_wq(struct work_struct *);
-
-int kfd_interrupt_init(struct kfd_dev *kfd)
-{
- void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
- kfd->device_info->ih_ring_entry_size,
- GFP_KERNEL);
- if (!interrupt_ring)
- return -ENOMEM;
-
- kfd->interrupt_ring = interrupt_ring;
- kfd->interrupt_ring_size =
- KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
- atomic_set(&kfd->interrupt_ring_wptr, 0);
- atomic_set(&kfd->interrupt_ring_rptr, 0);
-
- spin_lock_init(&kfd->interrupt_lock);
-
- INIT_WORK(&kfd->interrupt_work, interrupt_wq);
-
- kfd->interrupts_active = true;
-
- /*
- * After this function returns, the interrupt will be enabled. This
- * barrier ensures that the interrupt running on a different processor
- * sees all the above writes.
- */
- smp_wmb();
-
- return 0;
-}
-
-void kfd_interrupt_exit(struct kfd_dev *kfd)
-{
- /*
- * Stop the interrupt handler from writing to the ring and scheduling
- * workqueue items. The spinlock ensures that any interrupt running
- * after we have unlocked sees interrupts_active = false.
- */
- unsigned long flags;
-
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
- kfd->interrupts_active = false;
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
-
- /*
- * Flush_scheduled_work ensures that there are no outstanding
- * work-queue items that will access interrupt_ring. New work items
- * can't be created because we stopped interrupt handling above.
- */
- flush_scheduled_work();
-
- kfree(kfd->interrupt_ring);
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with dequeue_ih_ring_entry.
- */
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
-{
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
-
- if ((rptr - wptr) % kfd->interrupt_ring_size ==
- kfd->device_info->ih_ring_entry_size) {
- /* This is very bad, the system is likely to hang. */
- dev_err_ratelimited(kfd_chardev(),
- "Interrupt ring overflow, dropping interrupt.\n");
- return false;
- }
-
- memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
-
- wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
- smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
- atomic_set(&kfd->interrupt_ring_wptr, wptr);
-
- return true;
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with enqueue_ih_ring_entry.
- */
-static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
-{
- /*
- * Assume that wait queues have an implicit barrier, i.e. anything that
- * happened in the ISR before it queued work is visible.
- */
-
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
-
- if (rptr == wptr)
- return false;
-
- memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
- kfd->device_info->ih_ring_entry_size);
-
- rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
-
- /*
- * Ensure the rptr write update is not visible until
- * memcpy has finished reading.
- */
- smp_mb();
- atomic_set(&kfd->interrupt_ring_rptr, rptr);
-
- return true;
-}
-
-static void interrupt_wq(struct work_struct *work)
-{
- struct kfd_dev *dev = container_of(work, struct kfd_dev,
- interrupt_work);
-
- uint32_t ih_ring_entry[DIV_ROUND_UP(
- dev->device_info->ih_ring_entry_size,
- sizeof(uint32_t))];
-
- while (dequeue_ih_ring_entry(dev, ih_ring_entry))
- ;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 95d5af138e6e..a8be6df85347 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy");
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
}
/* Verify module parameters */
- if ((max_num_of_processes < 0) ||
- (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
- pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
- return -1;
- }
-
- if ((max_num_of_queues_per_process < 0) ||
- (max_num_of_queues_per_process >
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
- pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+ if ((max_num_of_queues_per_device < 0) ||
+ (max_num_of_queues_per_device >
+ KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+ pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 4c25ef504f79..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void)
{
- pasid_limit = max_num_of_processes;
+ pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index a5edb29507e3..96dc10e8904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
*/
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048
@@ -135,22 +134,10 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
- void *interrupt_ring;
- size_t interrupt_ring_size;
- atomic_t interrupt_ring_rptr;
- atomic_t interrupt_ring_wptr;
- struct work_struct interrupt_work;
- spinlock_t interrupt_lock;
-
/* QCM Device instance */
struct device_queue_manager *dqm;
bool init_complete;
- /*
- * Interrupts of interest to KFD are copied
- * from the HW ring into a SW ring.
- */
- bool interrupts_active;
};
/* KGD2KFD callbacks */
@@ -531,10 +518,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
/* Interrupts */
-int kfd_interrupt_init(struct kfd_dev *dev);
-void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 47526780d736..f37cf5efe642 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap,
- max_num_of_queues_per_process);
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found);
- if (found >= max_num_of_queues_per_process) {
+ if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("kfd: error dqm create queue\n");
+ pr_debug("Error dqm create queue\n");
goto err_create_queue;
}
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue:
kfree(pqn);
err_allocate_pqn:
+ /* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
+ if (list_empty(&pqm->queues))
+ dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 52ce26d6b4fb..dc386ebe5193 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+static void remove_from_modeset(struct drm_mode_set *set,
+ struct drm_connector *connector)
+{
+ int i, j;
+
+ for (i = 0; i < set->num_connectors; i++) {
+ if (set->connectors[i] == connector)
+ break;
+ }
+
+ if (i == set->num_connectors)
+ return;
+
+ for (j = i + 1; j < set->num_connectors; j++) {
+ set->connectors[j - 1] = set->connectors[j];
+ }
+ set->num_connectors--;
+
+ /* because i915 is pissy about this..
+ * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+ */
+ if (set->num_connectors == 0)
+ set->fb = NULL;
+}
+
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
+
+ /* also cleanup dangling references to the connector: */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -741,7 +771,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
int i, j, rc = 0;
int start;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -915,7 +947,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 121470a83d1a..1bcbe07cecfc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -645,18 +645,6 @@ static int exynos_drm_init(void)
if (!is_exynos)
return -ENODEV;
- /*
- * Register device object only in case of Exynos SoC.
- *
- * Below codes resolves temporarily infinite loop issue incurred
- * by Exynos drm driver when using multi-platform kernel.
- * So these codes will be replaced with more generic way later.
- */
- if (!of_machine_is_compatible("samsung,exynos3") &&
- !of_machine_is_compatible("samsung,exynos4") &&
- !of_machine_is_compatible("samsung,exynos5"))
- return -ENODEV;
-
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR(exynos_drm_pdev))
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5765a161abdd..98051e8e855a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
- u8 buffer[2];
u32 reg;
clk_disable_unprepare(hdata->res.sclk_hdmi);
@@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
clk_prepare_enable(hdata->res.sclk_hdmi);
/* operation mode */
- buffer[0] = 0x1f;
- buffer[1] = 0x00;
-
- if (hdata->hdmiphy_port)
- i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
if (hdata->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 820b76234ef4..064ed6597def 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
{
struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ int err;
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
@@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
}
mutex_unlock(&mixer_ctx->mixer_mutex);
- drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+ err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+ if (err < 0) {
+ DRM_DEBUG_KMS("failed to acquire vblank counter\n");
+ return;
+ }
atomic_set(&mixer_ctx->wait_vsync_event, 1);
@@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
return ret;
}
- pm_runtime_enable(dev);
-
return 0;
}
@@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data)
struct mixer_context *ctx = dev_get_drvdata(dev);
mixer_mgr_remove(&ctx->manager);
-
- pm_runtime_disable(dev);
}
static const struct component_ops mixer_component_ops = {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
+ struct mutex mutex;
+ struct delayed_work dwork;
uint16_t rev;
uint8_t current_page;
int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg);
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return ret;
+ goto out;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0)
goto fail;
- return ret;
+ goto out;
fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
}
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tda998x_priv *priv =
+ container_of(dwork, struct tda998x_priv, dwork);
+
+ if (priv->encoder && priv->encoder->dev)
+ drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
/*
* only 2 interrupts may occur: screen plug/unplug and EDID read
*/
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */
- if (priv->encoder && priv->encoder->dev)
- drm_helper_hpd_irq_event(priv->encoder->dev);
+ schedule_delayed_work(&priv->dwork, HZ/10);
}
return IRQ_HANDLED;
}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (priv->hdmi->irq)
+ if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv);
+ cancel_delayed_work_sync(&priv->dwork);
+ }
i2c_unregister_device(priv->cec);
}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
+ unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff;
priv->hdmi = client;
- priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ cec_addr = 0x34 + (client->addr & 0x03);
+ priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec)
return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
+ mutex_init(&priv->mutex); /* protect the page access */
+
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) {
int irqf_trigger;
- /* init read EDID waitqueue */
+ /* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
+ INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 574057cd1d09..7643300828c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_HSW_ULT(dev));
- } else if (IS_BROADWELL(dev)) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->pch_id =
- INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("This is Broadwell, assuming "
- "LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e9f891c432f8..9d7a7155bf02 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
- (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c11603b4cf1d..5f614828d365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
- /*
- * XXX: Contexts should only be initialized once. Doing a switch to the
- * default context switch however is something we'd like to do after
- * reset or thaw (the latter may not actually be necessary for HW, but
- * goes with our code better). Context switching requires rings (for
- * the do_switch), but before enabling PPGTT. So don't move this.
- */
- ret = i915_gem_context_enable(dev_priv);
+ ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
+ DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
-
- return ret;
}
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
+ DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
+
+ return ret;
}
return ret;
@@ -5155,7 +5155,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d0d3dfbe6d2a..b051a238baf9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -292,6 +292,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
+{
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+ return mask;
+}
+
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -304,8 +321,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
- ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
+ I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e2af1383b179..e7a16f119a29 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9815,7 +9815,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25fdbb16d4e0..3b40a17b8852 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -794,6 +794,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..dfb783a8f2c3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level == 0) {
+ if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 964b28e3c630..bf814a64582a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4363,16 +4363,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
- /* IVB and SNB hard hangs on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- */
- if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
- mask |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (IS_GEN8(dev_priv->dev))
- mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return ~mask;
+ return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
}
/* gen6_set_rps is called to update the frequency request, but should also be
@@ -4441,7 +4432,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
return;
/* Mask turbo interrupt so that they will not come in between */
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
vlv_force_gfx_clock(dev_priv, true);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dcde3798b45..64fdae558d36 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -6033,6 +6033,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+ WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* compute doesn't have PFP */
if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index dde5c7e29eb2..42cd0cffe210 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -903,6 +902,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm_id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
@@ -943,5 +945,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 1 << vm_id);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* reference */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 360de9f1f491..aea48c89b241 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 50f88611ff60..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -463,5 +462,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
+ radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0); /* value */
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2e12e4d69253..ad7125486894 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1133,6 +1133,23 @@
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
@@ -1272,6 +1289,13 @@
(1 << 21) | \
(((n) & 0xFFFFF) << 0))
+#define DMA_SRBM_POLL_PACKET ((9 << 28) | \
+ (1 << 27) | \
+ (1 << 26))
+
+#define DMA_SRBM_READ_PACKET ((9 << 28) | \
+ (1 << 27))
+
/* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+ return addr;
+}
+
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+ uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
- gtt[i] = cpu_to_le32(lower_32_bits(addr));
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = rdev->gart.ptr;
-
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED;
+ return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = rdev->gart.ptr;
+
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
- writel(addr, ((void __iomem *)ptr) + (i * 4));
+ writel(entry, ((void __iomem *)ptr) + (i * 4));
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b837afa..3f2a8d3febca 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
+ uint64_t entry;
struct page *page;
dma_addr_t addr;
};
@@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct page **pages;
- dma_addr_t *pages_addr;
+ uint64_t *pages_entry;
bool ready;
};
@@ -1847,8 +1848,9 @@ struct radeon_asic {
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
+ uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 850de57069be..ed0e10eee2dc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -333,6 +337,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
.set_wptr = &r100_gfx_set_wptr,
};
+static struct radeon_asic_ring rv515_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
+};
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -345,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -411,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -477,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -543,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -609,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -677,6 +700,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -745,10 +769,11 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -811,10 +836,11 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -905,6 +931,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -990,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1081,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1185,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1303,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1395,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1486,6 +1518,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1621,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1724,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1857,6 +1892,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2018,6 +2054,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2125,6 +2162,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2a45d548d5ec..8d787d115653 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
+ rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+ RADEON_GART_PAGE_DUMMY);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
+
+ if (!r) {
+ int i;
+
+ /* We might have dropped some GART table updates while it wasn't
+ * mapped, restore all entries
+ */
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
+
return r;
}
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
- u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
- page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base,
- RADEON_GART_PAGE_DUMMY);
+ radeon_gart_set_page(rdev, t,
+ rdev->dummy_page.entry);
}
- page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
{
unsigned t;
unsigned p;
- uint64_t page_base;
+ uint64_t page_base, page_entry;
int i, j;
if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
- if (rdev->gart.ptr) {
- page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base, flags);
- page_base += RADEON_GPU_PAGE_SIZE;
+ page_base = dma_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ page_entry = radeon_gart_get_page_entry(page_base, flags);
+ rdev->gart.pages_entry[t] = page_entry;
+ if (rdev->gart.ptr) {
+ radeon_gart_set_page(rdev, t, page_entry);
}
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
- rdev->gart.num_cpu_pages);
- if (rdev->gart.pages_addr == NULL) {
+ rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+ rdev->gart.num_gpu_pages);
+ if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
- for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
- rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
- }
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
return 0;
}
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
- if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+ if (rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
- vfree(rdev->gart.pages_addr);
+ vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL;
- rdev->gart.pages_addr = NULL;
+ rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a46f73737994..d0b4f7d1140d 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -576,7 +576,7 @@ error_unreserve:
error_free:
drm_free_large(vm_bos);
- if (r)
+ if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 8bf87f1203cc..bef9a0953284 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr)
{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
lock_srbm(kgd, mec, pipe, 0, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 32522cc940a1..f7da8fe96a66 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1287,8 +1287,39 @@ dpm_failed:
return ret;
}
+struct radeon_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+};
+
+/* cards with dpm stability problems */
+static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
+ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
+ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
+ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
+ { 0, 0, 0, 0 },
+};
+
int radeon_pm_init(struct radeon_device *rdev)
{
+ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
+ bool disable_dpm = false;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ disable_dpm = true;
+ break;
+ }
+ ++p;
+ }
+
/* enable dpm on rv6xx+ */
switch (rdev->family) {
case CHIP_RV610:
@@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev)
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (disable_dpm && (radeon_dpm == -1))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE;
else
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..06d2246d07f1 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result;
/* page table offset */
- result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+ result &= ~RADEON_GPU_PAGE_MASK;
return result;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
uint32_t entry;
- u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
- entry = cpu_to_le32(entry);
- gtt[i] = entry;
+ return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ u32 *gtt = rdev->gart.ptr;
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9acb1c3c005b..74bce91aecc1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = (void *)rdev->gart.ptr;
-
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED;
- writeq(addr, ptr + (i * 8));
+ return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = (void *)rdev->gart.ptr;
+ writeq(entry, ptr + (i * 8));
}
int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 60df444bd075..5d89b874a1a2 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index f5cc777e1c5f..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -206,6 +205,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
+ radeon_ring_write(ring, 0xff << 16); /* retry */
+ radeon_ring_write(ring, 1 << vm_id); /* mask */
+ radeon_ring_write(ring, 0); /* value */
+ radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
/**
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 32e354b8b0ab..eff8a6444956 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
return ret;
}
+struct si_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 max_sclk;
+ u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { 0, 0, 0, 0 },
+};
+
static void si_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u32 mclk, sclk;
u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ u32 max_sclk = 0, max_mclk = 0;
int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ max_sclk = p->max_sclk;
+ max_mclk = p->max_mclk;
+ break;
+ }
+ ++p;
+ }
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
@@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
+ if (max_mclk) {
+ if (ps->performance_levels[i].mclk > max_mclk)
+ ps->performance_levels[i].mclk = max_mclk;
+ }
+ if (max_sclk) {
+ if (ps->performance_levels[i].sclk > max_sclk)
+ ps->performance_levels[i].sclk = max_sclk;
+ }
}
/* XXX validate the min clocks required for display */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 4069be89e585..84999242c747 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1632,6 +1632,23 @@
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
#define PACKET3_CP_DMA 0x41
@@ -1835,6 +1852,7 @@
#define DMA_PACKET_TRAP 0x7
#define DMA_PACKET_SRBM_WRITE 0x9
#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
#define VCE_STATUS 0x20004
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
}
mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
- else if (hide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
+ else if (hide_svga)
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
- }
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
- mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
+ spin_lock_init(&dev_priv->hw_lock);
+ spin_lock_init(&dev_priv->waiter_lock);
+ spin_lock_init(&dev_priv->cap_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->enable_fb = enable_fbdev;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem = dev_priv->vram_size;
ret = vmw_dma_masks(dev_priv);
- if (unlikely(ret != 0)) {
- mutex_unlock(&dev_priv->hw_mutex);
+ if (unlikely(ret != 0))
goto out_err0;
- }
/*
* Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->prim_bb_mem > dev_priv->vram_size)
dev_priv->prim_bb_mem = dev_priv->vram_size;
- mutex_unlock(&dev_priv->hw_mutex);
-
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- mutex_unlock(&dev_priv->hw_mutex);
}
if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- mutex_unlock(&dev_priv->hw_mutex);
/**
* Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
uint32_t memory_size;
bool has_gmr;
bool has_mob;
- struct mutex hw_mutex;
+ spinlock_t hw_lock;
+ spinlock_t cap_lock;
/*
* VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
- int goal_queue_waiters; /* Protected by hw_mutex */
+ spinlock_t waiter_lock;
+ int fence_queue_waiters; /* Protected by waiter_lock */
+ int goal_queue_waiters; /* Protected by waiter_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
return (struct vmw_master *) master->driver_priv;
}
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- uint32_t val;
+ unsigned long irq_flags;
+ u32 val;
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
return val;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work, ping_work;
+ struct work_struct work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
return "svga";
}
-static void vmw_fence_ping_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, ping_work);
-
- vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
static bool vmw_fence_enable_signaling(struct fence *f)
{
struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
- if (mutex_trylock(&dev_priv->hw_mutex)) {
- vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
- mutex_unlock(&dev_priv->hw_mutex);
- } else
- schedule_work(&fman->ping_work);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
return true;
}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
- INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- (void) cancel_work_sync(&fman->ping_work);
spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!dev_priv->has_mob)
return false;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return (result != 0);
}
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
- mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- mutex_unlock(&dev_priv->hw_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
return vmw_fifo_send_fence(dev_priv, &dummy);
}
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ static DEFINE_SPINLOCK(ping_lock);
+ unsigned long irq_flags;
+ /*
+ * The ping_lock is needed because we don't have an atomic
+ * test-and-set of the SVGA_FIFO_BUSY register.
+ */
+ spin_lock_irqsave(&ping_lock, irq_flags);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_fifo_ping_host_locked(dev_priv, reason);
-
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock_irqrestore(&ping_lock, irq_flags);
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
if (interruptible)
ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
else if (likely(ret > 0))
ret = 0;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return 0;
}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
- uint32_t busy;
- mutex_lock(&dev_priv->hw_mutex);
- busy = vmw_read(dev_priv, SVGA_REG_BUSY);
- mutex_unlock(&dev_priv->hw_mutex);
-
- return (busy == 0);
+ return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
- mutex_unlock(&dev_priv->hw_mutex);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
- mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
- mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6529c09c46f0..a7de26d1ac80 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON
for those channels specified in the map. This map can be provided
either via platform data or the device tree bindings.
+config SENSORS_I5500
+ tristate "Intel 5500/5520/X58 temperature sensor"
+ depends on X86 && PCI
+ help
+ If you say yes here you get support for the temperature
+ sensor inside the Intel 5500, 5520 and X58 chipsets.
+
+ This driver can also be built as a module. If so, the module
+ will be called i5500_temp.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 67280643bcf0..6c941472e707 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_HTU21) += htu21.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
+obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
new file mode 100644
index 000000000000..3e3ccbf18b4e
--- /dev/null
+++ b/drivers/hwmon/i5500_temp.c
@@ -0,0 +1,149 @@
+/*
+ * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor
+ *
+ * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Register definitions from datasheet */
+#define REG_TSTHRCATA 0xE2
+#define REG_TSCTRL 0xE8
+#define REG_TSTHRRPEX 0xEB
+#define REG_TSTHRLO 0xEC
+#define REG_TSTHRHI 0xEE
+#define REG_CTHINT 0xF0
+#define REG_TSFSC 0xF3
+#define REG_CTSTS 0xF4
+#define REG_TSTHRRQPI 0xF5
+#define REG_CTCTRL 0xF7
+#define REG_TSTIMER 0xF8
+
+/*
+ * Sysfs stuff
+ */
+
+/* Sensor resolution : 0.5 degree C */
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ long temp;
+ u16 tsthrhi;
+ s8 tsfsc;
+
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ temp = ((long)tsthrhi - tsfsc) * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_thresh(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int reg = to_sensor_dev_attr(devattr)->index;
+ long temp;
+ u16 tsthr;
+
+ pci_read_config_word(pdev, reg, &tsthr);
+ temp = tsthr * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ u8 ctsts;
+
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+
+static struct attribute *i5500_temp_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(i5500_temp);
+
+static const struct pci_device_id i5500_temp_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, i5500_temp_ids);
+
+static int i5500_temp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct device *hwmon_dev;
+ u32 tstimer;
+ s8 tsfsc;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable device\n");
+ return err;
+ }
+
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ pci_read_config_dword(pdev, REG_TSTIMER, &tstimer);
+ if (tsfsc == 0x7F && tstimer == 0x07D30D40) {
+ dev_notice(&pdev->dev, "Sensor seems to be disabled\n");
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "intel5500", NULL,
+ i5500_temp_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct pci_driver i5500_temp_driver = {
+ .name = "i5500_temp",
+ .id_table = i5500_temp_ids,
+ .probe = i5500_temp_probe,
+};
+
+module_pci_driver(i5500_temp_driver);
+
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
+MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 31e8308ba899..ab838d9e28b6 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ select I2C_SLAVE
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bff20a589621..958c8db4ec30 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
int ret;
pm_runtime_get_sync(&adap->dev);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN) {
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return ret;
}
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
udelay(100);
}
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return -EREMOTEIO;
}
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
if (ret != 0) {
dev_err(&pdev->dev, "I2C controller init failed\n");
return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+ clk_unprepare(i2c->clk);
return ret;
}
}
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
s3c24xx_i2c_deregister_cpufreq(i2c);
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ clk_unprepare(i2c->clk);
+
pm_runtime_disable(&i2c->adap.dev);
pm_runtime_disable(&pdev->dev);
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
if (!IS_ERR(i2c->sysreg))
regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 440d5dbc8b5f..007818b3e174 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
int pos;
int sr;
bool send_stop;
+ bool stop_after_dma;
struct resource *res;
struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
if (pd->pos == pd->msg->len) {
/* Send stop if we haven't yet (DMA case) */
- if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+ if (pd->send_stop && pd->stop_after_dma)
i2c_op(pd, OP_TX_STOP, 0);
return 1;
}
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
real_pos = pd->pos - 2;
if (pd->pos == pd->msg->len) {
+ if (pd->stop_after_dma) {
+ /* Simulate PIO end condition after DMA transfer */
+ i2c_op(pd, OP_RX_STOP, 0);
+ pd->pos++;
+ break;
+ }
+
if (real_pos < 0) {
i2c_op(pd, OP_RX_STOP, 0);
break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
sh_mobile_i2c_dma_unmap(pd);
pd->pos = pd->msg->len;
+ pd->stop_after_dma = true;
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
bool do_start = pd->send_stop || !i;
msg = &msgs[i];
pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
+ pd->stop_after_dma = false;
err = start_ch(pd, msg, do_start);
if (err)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 39d25a8cb1ad..e9eae57a2b50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2972,6 +2972,7 @@ trace:
}
EXPORT_SYMBOL(i2c_smbus_xfer);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
return ret;
}
EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+#endif
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 6631400b5f02..cf9b09db092f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 57ecc5b204f3..9117b7a2d5f8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1114,7 +1114,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
struct mlx4_dev *dev = to_mdev(qp->device)->dev;
int err = 0;
- if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
ib_flow = flow_attr + 1;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index f2b978026407..6e22682c8255 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+ },
+ },
#endif
{ }
};
@@ -1520,6 +1536,8 @@ static int elantech_set_properties(struct elantech_data *etd)
case 7:
case 8:
case 9:
+ case 10:
+ case 13:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f9472920d986..23e26e0768b5 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN0039",
- "LEN2002", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN0037",
+ "LEN0039", "LEN2002", "LEN2004",
+ NULL},
1024, 5112, 2024, 4832
},
{
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
"LEN0035", /* X240 */
"LEN0036", /* T440 */
- "LEN0037",
+ "LEN0037", /* X1 Carbon 2nd */
"LEN0038",
"LEN0039", /* T440s */
"LEN0041",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c66d1b53843e..c11556563ef0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Medion Akoya E7225 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
/* Blue FB5601 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "blue"),
@@ -415,6 +423,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Acer Aspire 7738 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
@@ -745,6 +760,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{ }
};
+/*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
+static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ {
+ /* Gigabyte P35 v2 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ },
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ },
+ {
+ /* Gigabyte P34 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ },
+ },
+ { }
+};
+
#endif /* CONFIG_X86 */
#ifdef CONFIG_PNP
@@ -1040,6 +1084,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
+ if (dmi_check_system(i8042_dmi_kbdreset_table))
+ i8042_kbdreset = true;
+
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 924e4bf357fb..986a71c614b0 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
module_param_named(notimeout, i8042_notimeout, bool, 0);
MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+static bool i8042_kbdreset;
+module_param_named(kbdreset, i8042_kbdreset, bool, 0);
+MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
+
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
return -1;
/*
+ * Reset keyboard (needed on some laptops to successfully detect
+ * touchpad, e.g., some Gigabyte laptop models with Elantech
+ * touchpads).
+ */
+ if (i8042_kbdreset) {
+ pr_warn("Attempting to reset device connected to KBD port\n");
+ i8042_kbd_write(NULL, (unsigned char) 0xff);
+ }
+
+/*
* Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
* used it for a PCI card or somethig else.
*/
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index db98eb706466..baa0d9786f50 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -14,6 +14,32 @@ menuconfig IOMMU_SUPPORT
if IOMMU_SUPPORT
+menu "Generic IOMMU Pagetable Support"
+
+# Selected by the actual pagetable implementations
+config IOMMU_IO_PGTABLE
+ bool
+
+config IOMMU_IO_PGTABLE_LPAE
+ bool "ARMv7/v8 Long Descriptor Format"
+ select IOMMU_IO_PGTABLE
+ help
+ Enable support for the ARM long descriptor pagetable format.
+ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
+ sizes at both stage-1 and stage-2, as well as address spaces
+ up to 48-bits in size.
+
+config IOMMU_IO_PGTABLE_LPAE_SELFTEST
+ bool "LPAE selftests"
+ depends on IOMMU_IO_PGTABLE_LPAE
+ help
+ Enable self-tests for LPAE page table allocator. This performs
+ a series of page-table consistency checks during boot.
+
+ If unsure, say N here.
+
+endmenu
+
config IOMMU_IOVA
bool
@@ -295,6 +321,7 @@ config IPMMU_VMSA
depends on ARM_LPAE
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
help
Support for the Renesas VMSA-compatible IPMMU Renesas found in the
@@ -312,14 +339,13 @@ config SPAPR_TCE_IOMMU
config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || (ARM_LPAE && OF)
- depends on MMU
+ depends on (ARM64 || ARM) && MMU
select IOMMU_API
+ select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
help
Support for implementations of the ARM System MMU architecture
- versions 1 and 2. The driver supports both v7l and v8l table
- formats with 4k and 64k page sizes.
+ versions 1 and 2.
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 0b1b94ef13ab..080ffab4ed1c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6b6dc72e3f66..8d1fb7f18bc5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -843,10 +843,10 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
size_t size, u16 domid, int pde)
{
u64 pages;
- int s;
+ bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
+ s = false;
if (pages > 1) {
/*
@@ -854,7 +854,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
+ s = true;
}
address &= PAGE_MASK;
@@ -874,10 +874,10 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
u64 address, size_t size)
{
u64 pages;
- int s;
+ bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
- s = 0;
+ s = false;
if (pages > 1) {
/*
@@ -885,7 +885,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
- s = 1;
+ s = true;
}
address &= PAGE_MASK;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d3a3caf82271..6d5a5c44453b 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -151,18 +151,6 @@ static void put_device_state(struct device_state *dev_state)
wake_up(&dev_state->wq);
}
-static void put_device_state_wait(struct device_state *dev_state)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_dec_and_test(&dev_state->count))
- schedule();
- finish_wait(&dev_state->wq, &wait);
-
- free_device_state(dev_state);
-}
-
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
int pasid, bool alloc)
@@ -278,14 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
-
- if (!atomic_dec_and_test(&pasid_state->count))
- schedule();
-
- finish_wait(&pasid_state->wq, &wait);
+ wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@@ -851,7 +832,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
- put_device_state_wait(dev_state);
+ put_device_state(dev_state);
+ /*
+ * Wait until the last reference is dropped before freeing
+ * the device state.
+ */
+ wait_event(dev_state->wq, !atomic_read(&dev_state->count));
+ free_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6cd47b75286f..fc13dd56953e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -23,8 +23,6 @@
* - Stream-matching and stream-indexing
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
- * - 4k and 64k pages, with contiguous pte hints.
- * - Up to 48-bit addressing (dependent on VA_BITS)
* - Context fault reporting
*/
@@ -36,7 +34,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iommu.h>
-#include <linux/mm.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
@@ -46,7 +44,7 @@
#include <linux/amba/bus.h>
-#include <asm/pgalloc.h>
+#include "io-pgtable.h"
/* Maximum number of stream IDs assigned to a single device */
#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
@@ -71,40 +69,6 @@
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
? 0x400 : 0))
-/* Page table bits */
-#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
-#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
-#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
-#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
-#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
-#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
-#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
-
-#if PAGE_SIZE == SZ_4K
-#define ARM_SMMU_PTE_CONT_ENTRIES 16
-#elif PAGE_SIZE == SZ_64K
-#define ARM_SMMU_PTE_CONT_ENTRIES 32
-#else
-#define ARM_SMMU_PTE_CONT_ENTRIES 1
-#endif
-
-#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
-#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
-
-/* Stage-1 PTE */
-#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
-#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
-#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
-#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
-
-/* Stage-2 PTE */
-#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
-#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
-#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
-#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
-#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
-#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
-
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
#define sCR0_CLIENTPD (1 << 0)
@@ -132,17 +96,12 @@
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
-#define ARM_SMMU_GR0_PIDR0 0xfe0
-#define ARM_SMMU_GR0_PIDR1 0xfe4
-#define ARM_SMMU_GR0_PIDR2 0xfe8
#define ID0_S1TS (1 << 30)
#define ID0_S2TS (1 << 29)
#define ID0_NTS (1 << 28)
#define ID0_SMS (1 << 27)
-#define ID0_PTFS_SHIFT 24
-#define ID0_PTFS_MASK 0x2
-#define ID0_PTFS_V8_ONLY 0x2
+#define ID0_ATOSNS (1 << 26)
#define ID0_CTTW (1 << 14)
#define ID0_NUMIRPT_SHIFT 16
#define ID0_NUMIRPT_MASK 0xff
@@ -169,11 +128,7 @@
#define ID2_PTFS_16K (1 << 13)
#define ID2_PTFS_64K (1 << 14)
-#define PIDR2_ARCH_SHIFT 4
-#define PIDR2_ARCH_MASK 0xf
-
/* Global TLB invalidation */
-#define ARM_SMMU_GR0_STLBIALL 0x60
#define ARM_SMMU_GR0_TLBIVMID 0x64
#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
#define ARM_SMMU_GR0_TLBIALLH 0x6c
@@ -231,13 +186,25 @@
#define ARM_SMMU_CB_TTBCR2 0x10
#define ARM_SMMU_CB_TTBR0_LO 0x20
#define ARM_SMMU_CB_TTBR0_HI 0x24
+#define ARM_SMMU_CB_TTBR1_LO 0x28
+#define ARM_SMMU_CB_TTBR1_HI 0x2c
#define ARM_SMMU_CB_TTBCR 0x30
#define ARM_SMMU_CB_S1_MAIR0 0x38
+#define ARM_SMMU_CB_S1_MAIR1 0x3c
+#define ARM_SMMU_CB_PAR_LO 0x50
+#define ARM_SMMU_CB_PAR_HI 0x54
#define ARM_SMMU_CB_FSR 0x58
#define ARM_SMMU_CB_FAR_LO 0x60
#define ARM_SMMU_CB_FAR_HI 0x64
#define ARM_SMMU_CB_FSYNR0 0x68
+#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
+#define ARM_SMMU_CB_S1_TLBIVAL 0x620
+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_ATS1PR_LO 0x800
+#define ARM_SMMU_CB_ATS1PR_HI 0x804
+#define ARM_SMMU_CB_ATSR 0x8f0
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
@@ -249,47 +216,16 @@
#define SCTLR_M (1 << 0)
#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
-#define RESUME_RETRY (0 << 0)
-#define RESUME_TERMINATE (1 << 0)
-
-#define TTBCR_EAE (1 << 31)
+#define CB_PAR_F (1 << 0)
-#define TTBCR_PASIZE_SHIFT 16
-#define TTBCR_PASIZE_MASK 0x7
+#define ATSR_ACTIVE (1 << 0)
-#define TTBCR_TG0_4K (0 << 14)
-#define TTBCR_TG0_64K (1 << 14)
-
-#define TTBCR_SH0_SHIFT 12
-#define TTBCR_SH0_MASK 0x3
-#define TTBCR_SH_NS 0
-#define TTBCR_SH_OS 2
-#define TTBCR_SH_IS 3
-
-#define TTBCR_ORGN0_SHIFT 10
-#define TTBCR_IRGN0_SHIFT 8
-#define TTBCR_RGN_MASK 0x3
-#define TTBCR_RGN_NC 0
-#define TTBCR_RGN_WBWA 1
-#define TTBCR_RGN_WT 2
-#define TTBCR_RGN_WB 3
-
-#define TTBCR_SL0_SHIFT 6
-#define TTBCR_SL0_MASK 0x3
-#define TTBCR_SL0_LVL_2 0
-#define TTBCR_SL0_LVL_1 1
-
-#define TTBCR_T1SZ_SHIFT 16
-#define TTBCR_T0SZ_SHIFT 0
-#define TTBCR_SZ_MASK 0xf
+#define RESUME_RETRY (0 << 0)
+#define RESUME_TERMINATE (1 << 0)
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_MASK 0x7
-#define TTBCR2_PASIZE_SHIFT 0
-#define TTBCR2_PASIZE_MASK 0x7
-
-/* Common definitions for PASize and SEP fields */
#define TTBCR2_ADDR_32 0
#define TTBCR2_ADDR_36 1
#define TTBCR2_ADDR_40 2
@@ -297,16 +233,7 @@
#define TTBCR2_ADDR_44 4
#define TTBCR2_ADDR_48 5
-#define TTBRn_HI_ASID_SHIFT 16
-
-#define MAIR_ATTR_SHIFT(n) ((n) << 3)
-#define MAIR_ATTR_MASK 0xff
-#define MAIR_ATTR_DEVICE 0x04
-#define MAIR_ATTR_NC 0x44
-#define MAIR_ATTR_WBRWA 0xff
-#define MAIR_ATTR_IDX_NC 0
-#define MAIR_ATTR_IDX_CACHE 1
-#define MAIR_ATTR_IDX_DEV 2
+#define TTBRn_HI_ASID_SHIFT 16
#define FSR_MULTI (1 << 31)
#define FSR_SS (1 << 30)
@@ -366,6 +293,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
+#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,10 +308,9 @@ struct arm_smmu_device {
u32 num_mapping_groups;
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
- unsigned long s1_input_size;
- unsigned long s1_output_size;
- unsigned long s2_input_size;
- unsigned long s2_output_size;
+ unsigned long va_size;
+ unsigned long ipa_size;
+ unsigned long pa_size;
u32 num_global_irqs;
u32 num_context_irqs;
@@ -397,7 +324,6 @@ struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
u32 cbar;
- pgd_t *pgd;
};
#define INVALID_IRPTNDX 0xff
@@ -412,11 +338,15 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
+ struct io_pgtable_ops *pgtbl_ops;
+ spinlock_t pgtbl_lock;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
- spinlock_t lock;
+ struct mutex init_mutex; /* Protects smmu pointer */
};
+static struct iommu_ops arm_smmu_ops;
+
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
@@ -597,7 +527,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
}
/* Wait for any pending TLB invalidations to complete */
-static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
{
int count = 0;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
@@ -615,12 +545,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
}
}
-static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_tlb_sync(void *cookie)
{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ __arm_smmu_tlb_sync(smmu_domain->smmu);
+}
+
+static void arm_smmu_tlb_inv_context(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *base = ARM_SMMU_GR0(smmu);
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *base;
if (stage1) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -632,9 +569,76 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain)
base + ARM_SMMU_GR0_TLBIVMID);
}
- arm_smmu_tlb_sync(smmu);
+ __arm_smmu_tlb_sync(smmu);
+}
+
+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ bool leaf, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
+ void __iomem *reg;
+
+ if (stage1) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+
+ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+ iova &= ~12UL;
+ iova |= ARM_SMMU_CB_ASID(cfg);
+ writel_relaxed(iova, reg);
+#ifdef CONFIG_64BIT
+ } else {
+ iova >>= 12;
+ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
+ writeq_relaxed(iova, reg);
+#endif
+ }
+#ifdef CONFIG_64BIT
+ } else if (smmu->version == ARM_SMMU_V2) {
+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
+ ARM_SMMU_CB_S2_TLBIIPAS2;
+ writeq_relaxed(iova >> 12, reg);
+#endif
+ } else {
+ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
+ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
+ }
+}
+
+static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb(ishst);
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
}
+static struct iommu_gather_ops arm_smmu_gather_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context,
+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync,
+ .flush_pgtable = arm_smmu_flush_pgtable,
+};
+
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
int flags, ret;
@@ -712,29 +716,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
return IRQ_HANDLED;
}
-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
- size_t size)
-{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
- dsb(ishst);
- } else {
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of
- * recursion here as the SMMU table walker will not be wired
- * through another SMMU.
- */
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
- }
-}
-
-static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg)
{
u32 reg;
bool stage1;
@@ -771,124 +754,68 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
#else
reg = CBA2R_RW64_32BIT;
#endif
- writel_relaxed(reg,
- gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
-
- /* TTBCR2 */
- switch (smmu->s1_input_size) {
- case 32:
- reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
- break;
- case 36:
- reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
- break;
- case 39:
- case 40:
- reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
- break;
- case 42:
- reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
- break;
- case 44:
- reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
- break;
- case 48:
- reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
- break;
- }
-
- switch (smmu->s1_output_size) {
- case 32:
- reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
- break;
- case 36:
- reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
- break;
- case 39:
- case 40:
- reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
- break;
- case 42:
- reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
- break;
- case 44:
- reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
- break;
- case 48:
- reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
- break;
- }
-
- if (stage1)
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
- /* TTBR0 */
- arm_smmu_flush_pgtable(smmu, cfg->pgd,
- PTRS_PER_PGD * sizeof(pgd_t));
- reg = __pa(cfg->pgd);
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = (phys_addr_t)__pa(cfg->pgd) >> 32;
- if (stage1)
+ /* TTBRs */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-
- /*
- * TTBCR
- * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
- */
- if (smmu->version > ARM_SMMU_V1) {
- if (PAGE_SIZE == SZ_4K)
- reg = TTBCR_TG0_4K;
- else
- reg = TTBCR_TG0_64K;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
- if (!stage1) {
- reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT;
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+ }
- switch (smmu->s2_output_size) {
+ /* TTBCR */
+ if (stage1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ if (smmu->version > ARM_SMMU_V1) {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+ switch (smmu->va_size) {
case 32:
- reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
break;
case 36:
- reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
break;
case 40:
- reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
break;
case 42:
- reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
break;
case 44:
- reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
break;
case 48:
- reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
+ reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
break;
}
- } else {
- reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
}
} else {
- reg = 0;
+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
}
- reg |= TTBCR_EAE |
- (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT);
-
- if (!stage1)
- reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
-
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
-
- /* MAIR0 (stage-1 only) */
+ /* MAIRs (stage-1 only) */
if (stage1) {
- reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
- (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
- (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
}
/* SCTLR */
@@ -905,11 +832,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_device *smmu)
{
int irq, start, ret = 0;
- unsigned long flags;
+ unsigned long ias, oas;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct io_pgtable_cfg pgtbl_cfg;
+ enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- spin_lock_irqsave(&smmu_domain->lock, flags);
+ mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
goto out_unlock;
@@ -940,6 +870,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
case ARM_SMMU_DOMAIN_S1:
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks;
+ ias = smmu->va_size;
+ oas = smmu->ipa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S1;
+ else
+ fmt = ARM_32_LPAE_S1;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -949,6 +885,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0;
+ ias = smmu->ipa_size;
+ oas = smmu->pa_size;
+ if (IS_ENABLED(CONFIG_64BIT))
+ fmt = ARM_64_LPAE_S2;
+ else
+ fmt = ARM_32_LPAE_S2;
break;
default:
ret = -EINVAL;
@@ -968,10 +910,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = cfg->cbndx;
}
- ACCESS_ONCE(smmu_domain->smmu) = smmu;
- arm_smmu_init_context_bank(smmu_domain);
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
+ pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .ias = ias,
+ .oas = oas,
+ .tlb = &arm_smmu_gather_ops,
+ };
+
+ smmu_domain->smmu = smmu;
+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+ if (!pgtbl_ops) {
+ ret = -ENOMEM;
+ goto out_clear_smmu;
+ }
+
+ /* Update our support page sizes to reflect the page table format */
+ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ /* Initialise the context bank with our page table cfg */
+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
+
+ /*
+ * Request context fault interrupt. Do this last to avoid the
+ * handler seeing a half-initialised domain state.
+ */
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
"arm-smmu-context-fault", domain);
@@ -981,10 +943,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = INVALID_IRPTNDX;
}
+ mutex_unlock(&smmu_domain->init_mutex);
+
+ /* Publish page table ops for map/unmap */
+ smmu_domain->pgtbl_ops = pgtbl_ops;
return 0;
+out_clear_smmu:
+ smmu_domain->smmu = NULL;
out_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
+ mutex_unlock(&smmu_domain->init_mutex);
return ret;
}
@@ -999,23 +967,27 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (!smmu)
return;
- /* Disable the context bank and nuke the TLB before freeing it. */
+ /*
+ * Disable the context bank and free the page tables before freeing
+ * it.
+ */
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
- arm_smmu_tlb_inv_context(smmu_domain);
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
free_irq(irq, domain);
}
+ if (smmu_domain->pgtbl_ops)
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
static int arm_smmu_domain_init(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain;
- pgd_t *pgd;
/*
* Allocate the domain and initialise some of its data structures.
@@ -1026,81 +998,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
if (!smmu_domain)
return -ENOMEM;
- pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL);
- if (!pgd)
- goto out_free_domain;
- smmu_domain->cfg.pgd = pgd;
-
- spin_lock_init(&smmu_domain->lock);
+ mutex_init(&smmu_domain->init_mutex);
+ spin_lock_init(&smmu_domain->pgtbl_lock);
domain->priv = smmu_domain;
return 0;
-
-out_free_domain:
- kfree(smmu_domain);
- return -ENOMEM;
-}
-
-static void arm_smmu_free_ptes(pmd_t *pmd)
-{
- pgtable_t table = pmd_pgtable(*pmd);
-
- __free_page(table);
-}
-
-static void arm_smmu_free_pmds(pud_t *pud)
-{
- int i;
- pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
-
- pmd = pmd_base;
- for (i = 0; i < PTRS_PER_PMD; ++i) {
- if (pmd_none(*pmd))
- continue;
-
- arm_smmu_free_ptes(pmd);
- pmd++;
- }
-
- pmd_free(NULL, pmd_base);
-}
-
-static void arm_smmu_free_puds(pgd_t *pgd)
-{
- int i;
- pud_t *pud, *pud_base = pud_offset(pgd, 0);
-
- pud = pud_base;
- for (i = 0; i < PTRS_PER_PUD; ++i) {
- if (pud_none(*pud))
- continue;
-
- arm_smmu_free_pmds(pud);
- pud++;
- }
-
- pud_free(NULL, pud_base);
-}
-
-static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
-{
- int i;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- pgd_t *pgd, *pgd_base = cfg->pgd;
-
- /*
- * Recursively free the page tables for this domain. We don't
- * care about speculative TLB filling because the tables should
- * not be active in any context bank at this point (SCTLR.M is 0).
- */
- pgd = pgd_base;
- for (i = 0; i < PTRS_PER_PGD; ++i) {
- if (pgd_none(*pgd))
- continue;
- arm_smmu_free_puds(pgd);
- pgd++;
- }
-
- kfree(pgd_base);
}
static void arm_smmu_domain_destroy(struct iommu_domain *domain)
@@ -1112,7 +1013,6 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain)
* already been detached.
*/
arm_smmu_destroy_domain_context(domain);
- arm_smmu_free_pgtables(smmu_domain);
kfree(smmu_domain);
}
@@ -1244,7 +1144,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_device *smmu, *dom_smmu;
+ struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
smmu = find_smmu_for_device(dev);
@@ -1258,21 +1158,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EEXIST;
}
+ /* Ensure that the domain is finalised */
+ ret = arm_smmu_init_domain_context(domain, smmu);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
/*
* Sanity check the domain. We don't support domains across
* different SMMUs.
*/
- dom_smmu = ACCESS_ONCE(smmu_domain->smmu);
- if (!dom_smmu) {
- /* Now that we have a master, we can finalise the domain */
- ret = arm_smmu_init_domain_context(domain, smmu);
- if (IS_ERR_VALUE(ret))
- return ret;
-
- dom_smmu = smmu_domain->smmu;
- }
-
- if (dom_smmu != smmu) {
+ if (smmu_domain->smmu != smmu) {
dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
@@ -1303,293 +1198,103 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_domain_remove_master(smmu_domain, cfg);
}
-static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
- unsigned long end)
-{
- return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
- (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
-}
-
-static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- unsigned long pfn, int prot, int stage)
-{
- pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
-
- if (pmd_none(*pmd)) {
- /* Allocate a new set of tables */
- pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
-
- if (!table)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
- pmd_populate(NULL, pmd, table);
- arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
- }
-
- if (stage == 1) {
- pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG;
- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
- pteval |= ARM_SMMU_PTE_AP_RDONLY;
-
- if (prot & IOMMU_CACHE)
- pteval |= (MAIR_ATTR_IDX_CACHE <<
- ARM_SMMU_PTE_ATTRINDX_SHIFT);
- } else {
- pteval |= ARM_SMMU_PTE_HAP_FAULT;
- if (prot & IOMMU_READ)
- pteval |= ARM_SMMU_PTE_HAP_READ;
- if (prot & IOMMU_WRITE)
- pteval |= ARM_SMMU_PTE_HAP_WRITE;
- if (prot & IOMMU_CACHE)
- pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
- else
- pteval |= ARM_SMMU_PTE_MEMATTR_NC;
- }
-
- if (prot & IOMMU_NOEXEC)
- pteval |= ARM_SMMU_PTE_XN;
-
- /* If no access, create a faulting entry to avoid TLB fills */
- if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- pteval &= ~ARM_SMMU_PTE_PAGE;
-
- pteval |= ARM_SMMU_PTE_SH_IS;
- start = pmd_page_vaddr(*pmd) + pte_index(addr);
- pte = start;
-
- /*
- * Install the page table entries. This is fairly complicated
- * since we attempt to make use of the contiguous hint in the
- * ptes where possible. The contiguous hint indicates a series
- * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
- * contiguous region with the following constraints:
- *
- * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
- * - Each pte in the region has the contiguous hint bit set
- *
- * This complicates unmapping (also handled by this code, when
- * neither IOMMU_READ or IOMMU_WRITE are set) because it is
- * possible, yet highly unlikely, that a client may unmap only
- * part of a contiguous range. This requires clearing of the
- * contiguous hint bits in the range before installing the new
- * faulting entries.
- *
- * Note that re-mapping an address range without first unmapping
- * it is not supported, so TLB invalidation is not required here
- * and is instead performed at unmap and domain-init time.
- */
- do {
- int i = 1;
-
- pteval &= ~ARM_SMMU_PTE_CONT;
-
- if (arm_smmu_pte_is_contiguous_range(addr, end)) {
- i = ARM_SMMU_PTE_CONT_ENTRIES;
- pteval |= ARM_SMMU_PTE_CONT;
- } else if (pte_val(*pte) &
- (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
- int j;
- pte_t *cont_start;
- unsigned long idx = pte_index(addr);
-
- idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
- cont_start = pmd_page_vaddr(*pmd) + idx;
- for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
- pte_val(*(cont_start + j)) &=
- ~ARM_SMMU_PTE_CONT;
-
- arm_smmu_flush_pgtable(smmu, cont_start,
- sizeof(*pte) *
- ARM_SMMU_PTE_CONT_ENTRIES);
- }
-
- do {
- *pte = pfn_pte(pfn, __pgprot(pteval));
- } while (pte++, pfn++, addr += PAGE_SIZE, --i);
- } while (addr != end);
-
- arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
- return 0;
-}
-
-static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
- unsigned long addr, unsigned long end,
- phys_addr_t phys, int prot, int stage)
+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
{
int ret;
- pmd_t *pmd;
- unsigned long next, pfn = __phys_to_pfn(phys);
-
-#ifndef __PAGETABLE_PMD_FOLDED
- if (pud_none(*pud)) {
- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pmd)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
- pud_populate(NULL, pud, pmd);
- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
-
- pmd += pmd_index(addr);
- } else
-#endif
- pmd = pmd_offset(pud, addr);
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- do {
- next = pmd_addr_end(addr, end);
- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
- prot, stage);
- phys += next - addr;
- pfn = __phys_to_pfn(phys);
- } while (pmd++, addr = next, addr < end);
+ if (!ops)
+ return -ENODEV;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->map(ops, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
}
-static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
- unsigned long addr, unsigned long end,
- phys_addr_t phys, int prot, int stage)
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size)
{
- int ret = 0;
- pud_t *pud;
- unsigned long next;
-
-#ifndef __PAGETABLE_PUD_FOLDED
- if (pgd_none(*pgd)) {
- pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pud)
- return -ENOMEM;
-
- arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
- pgd_populate(NULL, pgd, pud);
- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
-
- pud += pud_index(addr);
- } else
-#endif
- pud = pud_offset(pgd, addr);
+ size_t ret;
+ unsigned long flags;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- do {
- next = pud_addr_end(addr, end);
- ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
- prot, stage);
- phys += next - addr;
- } while (pud++, addr = next, addr < end);
+ if (!ops)
+ return 0;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ ret = ops->unmap(ops, iova, size);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
}
-static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+ dma_addr_t iova)
{
- int ret, stage;
- unsigned long end;
- phys_addr_t input_mask, output_mask;
+ struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- pgd_t *pgd = cfg->pgd;
- unsigned long flags;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+ struct device *dev = smmu->dev;
+ void __iomem *cb_base;
+ u32 tmp;
+ u64 phys;
- if (cfg->cbar == CBAR_TYPE_S2_TRANS) {
- stage = 2;
- input_mask = (1ULL << smmu->s2_input_size) - 1;
- output_mask = (1ULL << smmu->s2_output_size) - 1;
+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
+ if (smmu->version == 1) {
+ u32 reg = iova & ~0xfff;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
} else {
- stage = 1;
- input_mask = (1ULL << smmu->s1_input_size) - 1;
- output_mask = (1ULL << smmu->s1_output_size) - 1;
+ u32 reg = iova & ~0xfff;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
+ reg = ((u64)iova & ~0xfff) >> 32;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
}
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- if ((phys_addr_t)iova & ~input_mask)
- return -ERANGE;
-
- if (paddr & ~output_mask)
- return -ERANGE;
-
- spin_lock_irqsave(&smmu_domain->lock, flags);
- pgd += pgd_index(iova);
- end = iova + size;
- do {
- unsigned long next = pgd_addr_end(iova, end);
-
- ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
- prot, stage);
- if (ret)
- goto out_unlock;
-
- paddr += next - iova;
- iova = next;
- } while (pgd++, iova != end);
-
-out_unlock:
- spin_unlock_irqrestore(&smmu_domain->lock, flags);
-
- return ret;
-}
-
-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- struct arm_smmu_domain *smmu_domain = domain->priv;
-
- if (!smmu_domain)
- return -ENODEV;
+ if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
+ !(tmp & ATSR_ACTIVE), 5, 50)) {
+ dev_err(dev,
+ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
+ &iova);
+ return ops->iova_to_phys(ops, iova);
+ }
- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot);
-}
+ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
+ phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size)
-{
- int ret;
- struct arm_smmu_domain *smmu_domain = domain->priv;
+ if (phys & CB_PAR_F) {
+ dev_err(dev, "translation fault!\n");
+ dev_err(dev, "PAR = 0x%llx\n", phys);
+ return 0;
+ }
- ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
- arm_smmu_tlb_inv_context(smmu_domain);
- return ret ? 0 : size;
+ return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
}
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+ dma_addr_t iova)
{
- pgd_t *pgdp, pgd;
- pud_t pud;
- pmd_t pmd;
- pte_t pte;
+ phys_addr_t ret;
+ unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
- pgdp = cfg->pgd;
- if (!pgdp)
+ if (!ops)
return 0;
- pgd = *(pgdp + pgd_index(iova));
- if (pgd_none(pgd))
- return 0;
-
- pud = *pud_offset(&pgd, iova);
- if (pud_none(pud))
- return 0;
-
- pmd = *pmd_offset(&pud, iova);
- if (pmd_none(pmd))
- return 0;
+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
+ ret = arm_smmu_iova_to_phys_hard(domain, iova);
+ else
+ ret = ops->iova_to_phys(ops, iova);
+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
- pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
- if (pte_none(pte))
- return 0;
-
- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
+ return ret;
}
static bool arm_smmu_capable(enum iommu_cap cap)
@@ -1698,24 +1403,34 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
+ int ret = 0;
struct arm_smmu_domain *smmu_domain = domain->priv;
+ mutex_lock(&smmu_domain->init_mutex);
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu)
- return -EPERM;
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
- return 0;
+ break;
default:
- return -ENODEV;
+ ret = -ENODEV;
}
+
+out_unlock:
+ mutex_unlock(&smmu_domain->init_mutex);
+ return ret;
}
-static const struct iommu_ops arm_smmu_ops = {
+static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init,
.domain_destroy = arm_smmu_domain_destroy,
@@ -1729,9 +1444,7 @@ static const struct iommu_ops arm_smmu_ops = {
.remove_device = arm_smmu_remove_device,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
- .pgsize_bitmap = (SECTION_SIZE |
- ARM_SMMU_PTE_CONT_SIZE |
- PAGE_SIZE),
+ .pgsize_bitmap = -1UL, /* Restricted during device attach */
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1760,7 +1473,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
/* Invalidate the TLB, just in case */
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
@@ -1782,7 +1494,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
/* Push the button */
- arm_smmu_tlb_sync(smmu);
+ __arm_smmu_tlb_sync(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
}
@@ -1816,12 +1528,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID0 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
-#ifndef CONFIG_64BIT
- if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
- dev_err(smmu->dev, "\tno v7 descriptor support!\n");
- return -ENODEV;
- }
-#endif
/* Restrict available stages based on module parameter */
if (force_stage == 1)
@@ -1850,6 +1556,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
+ if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
+ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
+ dev_notice(smmu->dev, "\taddress translation ops\n");
+ }
+
if (id & ID0_CTTW) {
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
dev_notice(smmu->dev, "\tcoherent table walk\n");
@@ -1894,16 +1605,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
/* Check for size mismatch of SMMU address space from mapped region */
- size = 1 <<
- (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
size *= 2 << smmu->pgshift;
if (smmu->size != size)
dev_warn(smmu->dev,
"SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
size, smmu->size);
- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
- ID1_NUMS2CB_MASK;
+ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
if (smmu->num_s2_context_banks > smmu->num_context_banks) {
dev_err(smmu->dev, "impossible number of S2 context banks!\n");
@@ -1915,46 +1624,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* ID2 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
- smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
-
- /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */
-#ifdef CONFIG_64BIT
- smmu->s2_input_size = min_t(unsigned long, VA_BITS, size);
-#else
- smmu->s2_input_size = min(32UL, size);
-#endif
+ smmu->ipa_size = size;
- /* The stage-2 output mask is also applied for bypass */
+ /* The output mask is also applied for bypass */
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
- smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size);
+ smmu->pa_size = size;
if (smmu->version == ARM_SMMU_V1) {
- smmu->s1_input_size = 32;
+ smmu->va_size = smmu->ipa_size;
+ size = SZ_4K | SZ_2M | SZ_1G;
} else {
-#ifdef CONFIG_64BIT
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
- size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
-#else
- size = 32;
+ smmu->va_size = arm_smmu_id_size_to_bits(size);
+#ifndef CONFIG_64BIT
+ smmu->va_size = min(32UL, smmu->va_size);
#endif
- smmu->s1_input_size = size;
-
- if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
- (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
- (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
- dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
- PAGE_SIZE);
- return -ENODEV;
- }
+ size = 0;
+ if (id & ID2_PTFS_4K)
+ size |= SZ_4K | SZ_2M | SZ_1G;
+ if (id & ID2_PTFS_16K)
+ size |= SZ_16K | SZ_32M;
+ if (id & ID2_PTFS_64K)
+ size |= SZ_64K | SZ_512M;
}
+ arm_smmu_ops.pgsize_bitmap &= size;
+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
- smmu->s1_input_size, smmu->s1_output_size);
+ smmu->va_size, smmu->ipa_size);
if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
- smmu->s2_input_size, smmu->s2_output_size);
+ smmu->ipa_size, smmu->pa_size);
return 0;
}
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 80ac68d884c5..abeedc9a78c2 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -18,22 +18,13 @@
#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
-#include <linux/init.h>
-#include <linux/iommu.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
+#include "fsl_pamu.h"
+
#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/of_platform.h>
-#include <linux/bootmem.h>
#include <linux/genalloc.h>
-#include <asm/io.h>
-#include <asm/bitops.h>
-#include <asm/fsl_guts.h>
-#include "fsl_pamu.h"
+#include <asm/mpc85xx.h>
+#include <asm/fsl_guts.h>
/* define indexes for each operation mapping scenario */
#define OMI_QMAN 0x00
@@ -44,13 +35,13 @@
#define make64(high, low) (((u64)(high) << 32) | (low))
struct pamu_isr_data {
- void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
+ void __iomem *pamu_reg_base; /* Base address of PAMU regs */
unsigned int count; /* The number of PAMUs */
};
static struct paace *ppaact;
static struct paace *spaact;
-static struct ome *omt;
+static struct ome *omt __initdata;
/*
* Table for matching compatible strings, for device tree
@@ -58,14 +49,13 @@ static struct ome *omt;
* "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used.
-*/
-static const struct of_device_id guts_device_ids[] = {
+ */
+static const struct of_device_id guts_device_ids[] __initconst = {
{ .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", },
{}
};
-
/*
* Table for matching compatible strings, for device tree
* L3 cache controller node.
@@ -73,7 +63,7 @@ static const struct of_device_id guts_device_ids[] = {
* "fsl,b4860-l3-cache-controller" corresponds to B4 &
* "fsl,p4080-l3-cache-controller" corresponds to other,
* SOCs.
-*/
+ */
static const struct of_device_id l3_device_ids[] = {
{ .compatible = "fsl,t4240-l3-cache-controller", },
{ .compatible = "fsl,b4860-l3-cache-controller", },
@@ -85,7 +75,7 @@ static const struct of_device_id l3_device_ids[] = {
static u32 max_subwindow_count;
/* Pool for fspi allocation */
-struct gen_pool *spaace_pool;
+static struct gen_pool *spaace_pool;
/**
* pamu_get_max_subwin_cnt() - Return the maximum supported
@@ -170,7 +160,7 @@ int pamu_disable_liodn(int liodn)
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{
/* Bug if not a power of 2 */
- BUG_ON((addrspace_size & (addrspace_size - 1)));
+ BUG_ON(addrspace_size & (addrspace_size - 1));
/* window size is 2^(WSE+1) bytes */
return fls64(addrspace_size) - 2;
@@ -179,8 +169,8 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
/* Derive the PAACE window count encoding for the subwindow count */
static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
{
- /* window count is 2^(WCE+1) bytes */
- return __ffs(subwindow_cnt) - 1;
+ /* window count is 2^(WCE+1) bytes */
+ return __ffs(subwindow_cnt) - 1;
}
/*
@@ -241,7 +231,7 @@ static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
* If no SPAACE entry is available or the allocator can not reserve the required
* number of contiguous entries function returns ULONG_MAX indicating a failure.
*
-*/
+ */
static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
{
unsigned long spaace_addr;
@@ -288,9 +278,8 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace) {
+ if (!paace)
return -ENOENT;
- }
}
set_bf(paace->impl_attr, PAACE_IA_CID, value);
@@ -311,14 +300,12 @@ int pamu_disable_spaace(int liodn, u32 subwin)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
- if (!paace) {
+ if (!paace)
return -ENOENT;
- }
- set_bf(paace->addr_bitfields, PAACE_AF_V,
- PAACE_V_INVALID);
+ set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
} else {
set_bf(paace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_DENIED);
+ PAACE_AP_PERMS_DENIED);
}
mb();
@@ -326,7 +313,6 @@ int pamu_disable_spaace(int liodn, u32 subwin)
return 0;
}
-
/**
* pamu_config_paace() - Sets up PPAACE entry for specified liodn
*
@@ -352,7 +338,8 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
unsigned long fspi;
if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
- pr_debug("window size too small or not a power of two %llx\n", win_size);
+ pr_debug("window size too small or not a power of two %pa\n",
+ &win_size);
return -EINVAL;
}
@@ -362,13 +349,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
}
ppaace = pamu_get_ppaace(liodn);
- if (!ppaace) {
+ if (!ppaace)
return -ENOENT;
- }
/* window size is 2^(WSE+1) bytes */
set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
- map_addrspace_size_to_wse(win_size));
+ map_addrspace_size_to_wse(win_size));
pamu_init_ppaace(ppaace);
@@ -442,7 +428,6 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
{
struct paace *paace;
-
/* setup sub-windows */
if (!subwin_cnt) {
pr_debug("Invalid subwindow count\n");
@@ -510,11 +495,11 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
}
/**
-* get_ome_index() - Returns the index in the operation mapping table
-* for device.
-* @*omi_index: pointer for storing the index value
-*
-*/
+ * get_ome_index() - Returns the index in the operation mapping table
+ * for device.
+ * @*omi_index: pointer for storing the index value
+ *
+ */
void get_ome_index(u32 *omi_index, struct device *dev)
{
if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
@@ -544,9 +529,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
node = of_find_matching_node(NULL, l3_device_ids);
if (node) {
- prop = of_get_property(node, "cache-stash-id", 0);
+ prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
- pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ pr_debug("missing cache-stash-id at %s\n",
+ node->full_name);
of_node_put(node);
return ~(u32)0;
}
@@ -570,9 +556,10 @@ found_cpu_node:
/* find the hwnode that represents the cache */
for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
if (stash_dest_hint == cache_level) {
- prop = of_get_property(node, "cache-stash-id", 0);
+ prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
- pr_debug("missing cache-stash-id at %s\n", node->full_name);
+ pr_debug("missing cache-stash-id at %s\n",
+ node->full_name);
of_node_put(node);
return ~(u32)0;
}
@@ -580,10 +567,10 @@ found_cpu_node:
return be32_to_cpup(prop);
}
- prop = of_get_property(node, "next-level-cache", 0);
+ prop = of_get_property(node, "next-level-cache", NULL);
if (!prop) {
pr_debug("can't find next-level-cache at %s\n",
- node->full_name);
+ node->full_name);
of_node_put(node);
return ~(u32)0; /* can't traverse any further */
}
@@ -598,7 +585,7 @@ found_cpu_node:
}
pr_debug("stash dest not found for %d on vcpu %d\n",
- stash_dest_hint, vcpu);
+ stash_dest_hint, vcpu);
return ~(u32)0;
}
@@ -612,7 +599,7 @@ found_cpu_node:
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
-static void setup_qbman_paace(struct paace *ppaace, int paace_type)
+static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@@ -626,7 +613,7 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type)
case QMAN_PORTAL_PAACE:
set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
ppaace->op_encode.index_ot.omi = OMI_QMAN;
- /*Set DQRR and Frame stashing for the L3 cache */
+ /* Set DQRR and Frame stashing for the L3 cache */
set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
break;
case BMAN_PAACE:
@@ -679,7 +666,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
-static void get_pamu_cap_values(unsigned long pamu_reg_base)
+static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
{
u32 pc_val;
@@ -689,9 +676,9 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
- phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
- phys_addr_t omt_phys)
+static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+ phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+ phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@@ -727,7 +714,7 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
*/
out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
- PAMU_ACCESS_VIOLATION_ENABLE);
+ PAMU_ACCESS_VIOLATION_ENABLE);
out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
return 0;
}
@@ -757,9 +744,9 @@ static void __init setup_liodns(void)
ppaace->wbah = 0;
set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
set_bf(ppaace->impl_attr, PAACE_IA_ATM,
- PAACE_ATM_NO_XLATE);
+ PAACE_ATM_NO_XLATE);
set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
- PAACE_AP_PERMS_ALL);
+ PAACE_AP_PERMS_ALL);
if (of_device_is_compatible(node, "fsl,qman-portal"))
setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
if (of_device_is_compatible(node, "fsl,qman"))
@@ -772,7 +759,7 @@ static void __init setup_liodns(void)
}
}
-irqreturn_t pamu_av_isr(int irq, void *arg)
+static irqreturn_t pamu_av_isr(int irq, void *arg)
{
struct pamu_isr_data *data = arg;
phys_addr_t phys;
@@ -792,14 +779,16 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
pr_emerg("AVS1=%08x\n", avs1);
pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
- pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
- in_be32(p + PAMU_AVAL)));
+ pr_emerg("AVA=%016llx\n",
+ make64(in_be32(p + PAMU_AVAH),
+ in_be32(p + PAMU_AVAL)));
pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
- pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
- in_be32(p + PAMU_POEAL)));
+ pr_emerg("POEA=%016llx\n",
+ make64(in_be32(p + PAMU_POEAH),
+ in_be32(p + PAMU_POEAL)));
phys = make64(in_be32(p + PAMU_POEAH),
- in_be32(p + PAMU_POEAL));
+ in_be32(p + PAMU_POEAL));
/* Assume that POEA points to a PAACE */
if (phys) {
@@ -807,11 +796,12 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Only the first four words are relevant */
for (j = 0; j < 4; j++)
- pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
+ pr_emerg("PAACE[%u]=%08x\n",
+ j, in_be32(paace + j));
}
/* clear access violation condition */
- out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
+ out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(!paace);
/* check if we got a violation for a disabled LIODN */
@@ -827,13 +817,13 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Disable the LIODN */
ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(ret);
- pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
+ pr_emerg("Disabling liodn %x\n",
+ avs1 >> PAMU_AVS1_LIODN_SHIFT);
}
out_be32((p + PAMU_PICS), pics);
}
}
-
return IRQ_HANDLED;
}
@@ -952,7 +942,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
}
if (i == 0 || i == num_laws) {
- /* This should never happen*/
+ /* This should never happen */
ret = -ENOENT;
goto error;
}
@@ -998,26 +988,27 @@ error:
static const struct {
u32 svr;
u32 port_id;
-} port_id_map[] = {
- {0x82100010, 0xFF000000}, /* P2040 1.0 */
- {0x82100011, 0xFF000000}, /* P2040 1.1 */
- {0x82100110, 0xFF000000}, /* P2041 1.0 */
- {0x82100111, 0xFF000000}, /* P2041 1.1 */
- {0x82110310, 0xFF000000}, /* P3041 1.0 */
- {0x82110311, 0xFF000000}, /* P3041 1.1 */
- {0x82010020, 0xFFF80000}, /* P4040 2.0 */
- {0x82000020, 0xFFF80000}, /* P4080 2.0 */
- {0x82210010, 0xFC000000}, /* P5010 1.0 */
- {0x82210020, 0xFC000000}, /* P5010 2.0 */
- {0x82200010, 0xFC000000}, /* P5020 1.0 */
- {0x82050010, 0xFF800000}, /* P5021 1.0 */
- {0x82040010, 0xFF800000}, /* P5040 1.0 */
+} port_id_map[] __initconst = {
+ {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
+ {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
+ {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
+ {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */
+ {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */
+ {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */
+ {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */
+ {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */
+ {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */
+ {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */
+ {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */
+ {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */
+ {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */
};
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
static int __init fsl_pamu_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL;
struct ccsr_guts __iomem *guts_regs = NULL;
u32 pamubypenr, pamu_counter;
@@ -1042,22 +1033,21 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
* NOTE : All PAMUs share the same LIODN tables.
*/
- pamu_regs = of_iomap(pdev->dev.of_node, 0);
+ pamu_regs = of_iomap(dev->of_node, 0);
if (!pamu_regs) {
- dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
+ dev_err(dev, "ioremap of PAMU node failed\n");
return -ENOMEM;
}
- of_get_address(pdev->dev.of_node, 0, &size, NULL);
+ of_get_address(dev->of_node, 0, &size, NULL);
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ irq = irq_of_parse_and_map(dev->of_node, 0);
if (irq == NO_IRQ) {
- dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
+ dev_warn(dev, "no interrupts listed in PAMU node\n");
goto error;
}
- data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
- dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
ret = -ENOMEM;
goto error;
}
@@ -1067,15 +1057,14 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* The ISR needs access to the regs, so we won't iounmap them */
ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
if (ret < 0) {
- dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
- ret, irq);
+ dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq);
goto error;
}
guts_node = of_find_matching_node(NULL, guts_device_ids);
if (!guts_node) {
- dev_err(&pdev->dev, "could not find GUTS node %s\n",
- pdev->dev.of_node->full_name);
+ dev_err(dev, "could not find GUTS node %s\n",
+ dev->of_node->full_name);
ret = -ENODEV;
goto error;
}
@@ -1083,7 +1072,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
guts_regs = of_iomap(guts_node, 0);
of_node_put(guts_node);
if (!guts_regs) {
- dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
+ dev_err(dev, "ioremap of GUTS node failed\n");
ret = -ENODEV;
goto error;
}
@@ -1103,7 +1092,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
- dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
+ dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n");
ret = -ENOMEM;
goto error;
}
@@ -1113,7 +1102,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* Make sure the memory is naturally aligned */
if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
- dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
+ dev_err(dev, "PAACT/OMT block is unaligned\n");
ret = -ENOMEM;
goto error;
}
@@ -1121,8 +1110,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
- dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
- (unsigned long long) ppaact_phys);
+ dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys);
/* Check to see if we need to implement the work-around on this SOC */
@@ -1130,21 +1118,19 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
csd_port_id = port_id_map[i].port_id;
- dev_dbg(&pdev->dev, "found matching SVR %08x\n",
+ dev_dbg(dev, "found matching SVR %08x\n",
port_id_map[i].svr);
break;
}
}
if (csd_port_id) {
- dev_dbg(&pdev->dev, "creating coherency subdomain at address "
- "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
- mem_size, csd_port_id);
+ dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x",
+ &ppaact_phys, mem_size, csd_port_id);
ret = create_csd(ppaact_phys, mem_size, csd_port_id);
if (ret) {
- dev_err(&pdev->dev, "could not create coherence "
- "subdomain\n");
+ dev_err(dev, "could not create coherence subdomain\n");
return ret;
}
}
@@ -1155,7 +1141,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
if (!spaace_pool) {
ret = -ENOMEM;
- dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
+ dev_err(dev, "Failed to allocate spaace gen pool\n");
goto error;
}
@@ -1168,9 +1154,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
- pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
+ pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off;
setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
- spaact_phys, omt_phys);
+ spaact_phys, omt_phys);
/* Disable PAMU bypass for this PAMU */
pamubypenr &= ~pamu_counter;
}
@@ -1182,7 +1168,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
iounmap(guts_regs);
- /* Enable DMA for the LIODNs in the device tree*/
+ /* Enable DMA for the LIODNs in the device tree */
setup_liodns();
@@ -1214,17 +1200,7 @@ error:
return ret;
}
-static const struct of_device_id fsl_of_pamu_ids[] = {
- {
- .compatible = "fsl,p4080-pamu",
- },
- {
- .compatible = "fsl,pamu",
- },
- {},
-};
-
-static struct platform_driver fsl_of_pamu_driver = {
+static struct platform_driver fsl_of_pamu_driver __initdata = {
.driver = {
.name = "fsl-of-pamu",
},
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index 8fc1a125b16e..aab723f91f12 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -19,13 +19,15 @@
#ifndef __FSL_PAMU_H
#define __FSL_PAMU_H
+#include <linux/iommu.h>
+
#include <asm/fsl_pamu_stash.h>
/* Bit Field macros
* v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
*/
-#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
-#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
+#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << m##_SHIFT) & (m)))
+#define get_bf(v, m) (((v) & (m)) >> m##_SHIFT)
/* PAMU CCSR space */
#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
@@ -65,7 +67,7 @@ struct pamu_mmap_regs {
#define PAMU_AVS1_GCV 0x2000
#define PAMU_AVS1_PDV 0x4000
#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
- | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
+ | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
#define PAMU_AVS1_LIODN_SHIFT 16
#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
@@ -198,8 +200,7 @@ struct pamu_mmap_regs {
#define PAACE_ATM_NO_XLATE 0x00
#define PAACE_ATM_WINDOW_XLATE 0x01
#define PAACE_ATM_PAGE_XLATE 0x02
-#define PAACE_ATM_WIN_PG_XLATE \
- (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
+#define PAACE_ATM_WIN_PG_XLATE (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
#define PAACE_OTM_NO_XLATE 0x00
#define PAACE_OTM_IMMEDIATE 0x01
#define PAACE_OTM_INDEXED 0x02
@@ -219,7 +220,7 @@ struct pamu_mmap_regs {
#define PAACE_TCEF_FORMAT0_8B 0x00
#define PAACE_TCEF_FORMAT1_RSVD 0x01
/*
- * Hard coded value for the PAACT size to accomodate
+ * Hard coded value for the PAACT size to accommodate
* maximum LIODN value generated by u-boot.
*/
#define PAACE_NUMBER_ENTRIES 0x500
@@ -332,7 +333,7 @@ struct paace {
#define NUM_MOE 128
struct ome {
u8 moe[NUM_MOE];
-} __attribute__((packed));
+} __packed;
#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c828f80d48b0..ceebd287b660 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -19,26 +19,10 @@
#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
-#include <linux/init.h>
-#include <linux/iommu.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/of_platform.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <asm/io.h>
-#include <asm/bitops.h>
-
-#include <asm/pci-bridge.h>
-#include <sysdev/fsl_pci.h>
-
#include "fsl_pamu_domain.h"
+#include <sysdev/fsl_pci.h>
+
/*
* Global spinlock that needs to be held while
* configuring PAMU.
@@ -51,23 +35,21 @@ static DEFINE_SPINLOCK(device_domain_lock);
static int __init iommu_init_mempool(void)
{
-
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
- sizeof(struct fsl_dma_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
-
- NULL);
+ sizeof(struct fsl_dma_domain),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
if (!fsl_pamu_domain_cache) {
pr_debug("Couldn't create fsl iommu_domain cache\n");
return -ENOMEM;
}
iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
+ sizeof(struct device_domain_info),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
if (!iommu_devinfo_cache) {
pr_debug("Couldn't create devinfo cache\n");
kmem_cache_destroy(fsl_pamu_domain_cache);
@@ -80,8 +62,7 @@ static int __init iommu_init_mempool(void)
static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
{
u32 win_cnt = dma_domain->win_cnt;
- struct dma_window *win_ptr =
- &dma_domain->win_arr[0];
+ struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom;
geom = &dma_domain->iommu_domain->geometry;
@@ -103,22 +84,20 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
}
if (win_ptr->valid)
- return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
+ return win_ptr->paddr + (iova & (win_ptr->size - 1));
return 0;
}
static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
{
- struct dma_window *sub_win_ptr =
- &dma_domain->win_arr[0];
+ struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
int i, ret;
unsigned long rpn, flags;
for (i = 0; i < dma_domain->win_cnt; i++) {
if (sub_win_ptr[i].valid) {
- rpn = sub_win_ptr[i].paddr >>
- PAMU_PAGE_SHIFT;
+ rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
sub_win_ptr[i].size,
@@ -130,7 +109,7 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
sub_win_ptr[i].prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
+ pr_debug("SPAACE configuration failed for liodn %d\n",
liodn);
return ret;
}
@@ -156,8 +135,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
0, wnd->prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret)
- pr_debug("PAMU PAACE configuration failed for liodn %d\n",
- liodn);
+ pr_debug("PAACE configuration failed for liodn %d\n", liodn);
return ret;
}
@@ -169,7 +147,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
return map_subwins(liodn, dma_domain);
else
return map_win(liodn, dma_domain);
-
}
/* Update window/subwindow mapping for the LIODN */
@@ -190,7 +167,8 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
(wnd_nr > 0) ? 1 : 0,
wnd->prot);
if (ret)
- pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
+ pr_debug("Subwindow reconfiguration failed for liodn %d\n",
+ liodn);
} else {
phys_addr_t wnd_addr;
@@ -200,10 +178,11 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
wnd->size,
~(u32)0,
wnd->paddr >> PAMU_PAGE_SHIFT,
- dma_domain->snoop_id, dma_domain->stash_id,
- 0, wnd->prot);
+ dma_domain->snoop_id, dma_domain->stash_id,
+ 0, wnd->prot);
if (ret)
- pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
+ pr_debug("Window reconfiguration failed for liodn %d\n",
+ liodn);
}
spin_unlock_irqrestore(&iommu_lock, flags);
@@ -212,14 +191,15 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
}
static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
- u32 val)
+ u32 val)
{
int ret = 0, i;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
if (!dma_domain->win_arr) {
- pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
+ pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
+ liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return -EINVAL;
}
@@ -227,7 +207,8 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
for (i = 0; i < dma_domain->win_cnt; i++) {
ret = pamu_update_paace_stash(liodn, i, val);
if (ret) {
- pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
+ pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
+ i, liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
@@ -240,9 +221,9 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
/* Set the geometry parameters for a LIODN */
static int pamu_set_liodn(int liodn, struct device *dev,
- struct fsl_dma_domain *dma_domain,
- struct iommu_domain_geometry *geom_attr,
- u32 win_cnt)
+ struct fsl_dma_domain *dma_domain,
+ struct iommu_domain_geometry *geom_attr,
+ u32 win_cnt)
{
phys_addr_t window_addr, window_size;
phys_addr_t subwin_size;
@@ -268,7 +249,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
dma_domain->stash_id, win_cnt, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
+ pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
+ liodn, win_cnt);
return ret;
}
@@ -285,7 +267,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
0, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
- pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
+ pr_debug("SPAACE configuration failed for liodn %d\n",
+ liodn);
return ret;
}
}
@@ -301,13 +284,13 @@ static int check_size(u64 size, dma_addr_t iova)
* to PAMU page size.
*/
if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
- pr_debug("%s: size too small or not a power of two\n", __func__);
+ pr_debug("Size too small or not a power of two\n");
return -EINVAL;
}
- /* iova must be page size aligned*/
+ /* iova must be page size aligned */
if (iova & (size - 1)) {
- pr_debug("%s: address is not aligned with window size\n", __func__);
+ pr_debug("Address is not aligned with window size\n");
return -EINVAL;
}
@@ -396,16 +379,15 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
-
}
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+ dma_addr_t iova)
{
struct fsl_dma_domain *dma_domain = domain->priv;
- if ((iova < domain->geometry.aperture_start) ||
- iova > (domain->geometry.aperture_end))
+ if (iova < domain->geometry.aperture_start ||
+ iova > domain->geometry.aperture_end)
return 0;
return get_phys_addr(dma_domain, iova);
@@ -460,7 +442,7 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
list_for_each_entry(info, &dma_domain->devices, link) {
ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
- geom_attr, win_cnt);
+ geom_attr, win_cnt);
if (ret)
break;
}
@@ -543,7 +525,6 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
}
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -576,7 +557,7 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
if (size > win_size) {
- pr_debug("Invalid window size \n");
+ pr_debug("Invalid window size\n");
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -EINVAL;
}
@@ -622,8 +603,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
* and window mappings.
*/
static int handle_attach_device(struct fsl_dma_domain *dma_domain,
- struct device *dev, const u32 *liodn,
- int num)
+ struct device *dev, const u32 *liodn,
+ int num)
{
unsigned long flags;
struct iommu_domain *domain = dma_domain->iommu_domain;
@@ -632,11 +613,10 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&dma_domain->domain_lock, flags);
for (i = 0; i < num; i++) {
-
/* Ensure that LIODN value is valid */
if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
pr_debug("Invalid liodn %d, attach device failed for %s\n",
- liodn[i], dev->of_node->full_name);
+ liodn[i], dev->of_node->full_name);
ret = -EINVAL;
break;
}
@@ -649,9 +629,9 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
*/
if (dma_domain->win_arr) {
u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
+
ret = pamu_set_liodn(liodn[i], dev, dma_domain,
- &domain->geometry,
- win_cnt);
+ &domain->geometry, win_cnt);
if (ret)
break;
if (dma_domain->mapped) {
@@ -698,19 +678,18 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
if (liodn) {
liodn_cnt = len / sizeof(u32);
- ret = handle_attach_device(dma_domain, dev,
- liodn, liodn_cnt);
+ ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
} else {
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
- ret = -EINVAL;
+ dev->of_node->full_name);
+ ret = -EINVAL;
}
return ret;
}
static void fsl_pamu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev)
{
struct fsl_dma_domain *dma_domain = domain->priv;
const u32 *prop;
@@ -738,7 +717,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
detach_device(dev, dma_domain);
else
pr_debug("missing fsl,liodn property at %s\n",
- dev->of_node->full_name);
+ dev->of_node->full_name);
}
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
@@ -754,10 +733,10 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data)
* DMA outside of the geometry.
*/
if (check_size(geom_size, geom_attr->aperture_start) ||
- !geom_attr->force_aperture) {
- pr_debug("Invalid PAMU geometry attributes\n");
- return -EINVAL;
- }
+ !geom_attr->force_aperture) {
+ pr_debug("Invalid PAMU geometry attributes\n");
+ return -EINVAL;
+ }
spin_lock_irqsave(&dma_domain->domain_lock, flags);
if (dma_domain->enabled) {
@@ -786,7 +765,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
spin_lock_irqsave(&dma_domain->domain_lock, flags);
memcpy(&dma_domain->dma_stash, stash_attr,
- sizeof(struct pamu_stash_attribute));
+ sizeof(struct pamu_stash_attribute));
dma_domain->stash_id = get_stash_id(stash_attr->cache,
stash_attr->cpu);
@@ -803,7 +782,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
return ret;
}
-/* Configure domain dma state i.e. enable/disable DMA*/
+/* Configure domain dma state i.e. enable/disable DMA */
static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
{
struct device_domain_info *info;
@@ -819,8 +798,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
dma_domain->enabled = enable;
- list_for_each_entry(info, &dma_domain->devices,
- link) {
+ list_for_each_entry(info, &dma_domain->devices, link) {
ret = (enable) ? pamu_enable_liodn(info->liodn) :
pamu_disable_liodn(info->liodn);
if (ret)
@@ -833,12 +811,11 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
+ enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
-
switch (attr_type) {
case DOMAIN_ATTR_GEOMETRY:
ret = configure_domain_geometry(domain, data);
@@ -853,22 +830,21 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
- };
+ }
return ret;
}
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
- enum iommu_attr attr_type, void *data)
+ enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
-
switch (attr_type) {
case DOMAIN_ATTR_FSL_PAMU_STASH:
- memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
- sizeof(struct pamu_stash_attribute));
+ memcpy(data, &dma_domain->dma_stash,
+ sizeof(struct pamu_stash_attribute));
break;
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
*(int *)data = dma_domain->enabled;
@@ -880,7 +856,7 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
- };
+ }
return ret;
}
@@ -903,11 +879,8 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
/* Check the PCI controller version number by readding BRR1 register */
version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
version &= PCI_FSL_BRR1_VER;
- /* If PCI controller version is >= 0x204 we can partition endpoints*/
- if (version >= 0x204)
- return 1;
-
- return 0;
+ /* If PCI controller version is >= 0x204 we can partition endpoints */
+ return version >= 0x204;
}
/* Get iommu group information from peer devices or devices on the parent bus */
@@ -968,8 +941,9 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
if (pci_ctl->parent->iommu_group) {
group = get_device_iommu_group(pci_ctl->parent);
iommu_group_remove_device(pci_ctl->parent);
- } else
+ } else {
group = get_shared_pci_device_group(pdev);
+ }
}
if (!group)
@@ -1055,11 +1029,12 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
}
ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- ((w_count > 1) ? w_count : 0));
+ w_count > 1 ? w_count : 0);
if (!ret) {
kfree(dma_domain->win_arr);
- dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
- w_count, GFP_ATOMIC);
+ dma_domain->win_arr = kcalloc(w_count,
+ sizeof(*dma_domain->win_arr),
+ GFP_ATOMIC);
if (!dma_domain->win_arr) {
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -ENOMEM;
@@ -1095,7 +1070,7 @@ static const struct iommu_ops fsl_pamu_ops = {
.remove_device = fsl_pamu_remove_device,
};
-int pamu_domain_init(void)
+int __init pamu_domain_init(void)
{
int ret = 0;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
new file mode 100644
index 000000000000..5a500edf00cc
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -0,0 +1,986 @@
+/*
+ * CPU-agnostic ARM page table allocator.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+#define ARM_LPAE_MAX_ADDR_BITS 48
+#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
+#define ARM_LPAE_MAX_LEVELS 4
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct arm_lpae_io_pgtable, iop)
+
+#define io_pgtable_ops_to_pgtable(x) \
+ container_of((x), struct io_pgtable, ops)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+/*
+ * For consistency with the architecture, we always consider
+ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
+ */
+#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
+
+/*
+ * Calculate the right shift amount to get to the portion describing level l
+ * in a virtual address mapped by the pagetable in d.
+ */
+#define ARM_LPAE_LVL_SHIFT(l,d) \
+ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
+ * (d)->bits_per_level) + (d)->pg_shift)
+
+#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
+
+/*
+ * Calculate the index at level l used to map virtual address a using the
+ * pagetable in d.
+ */
+#define ARM_LPAE_PGD_IDX(l,d) \
+ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+
+#define ARM_LPAE_LVL_IDX(a,l,d) \
+ (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
+ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
+
+/* Calculate the block/page mapping size at level l for pagetable in d. */
+#define ARM_LPAE_BLOCK_SIZE(l,d) \
+ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
+ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+
+/* Page table bits */
+#define ARM_LPAE_PTE_TYPE_SHIFT 0
+#define ARM_LPAE_PTE_TYPE_MASK 0x3
+
+#define ARM_LPAE_PTE_TYPE_BLOCK 1
+#define ARM_LPAE_PTE_TYPE_TABLE 3
+#define ARM_LPAE_PTE_TYPE_PAGE 3
+
+#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
+#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
+#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
+#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
+#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
+#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
+#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
+#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
+
+#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
+/* Ignore the contiguous bit for block splitting */
+#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
+#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
+ ARM_LPAE_PTE_ATTR_HI_MASK)
+
+/* Stage-1 PTE */
+#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
+#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
+
+/* Stage-2 PTE */
+#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
+#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
+#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
+#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
+
+/* Register bits */
+#define ARM_32_LPAE_TCR_EAE (1 << 31)
+#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
+
+#define ARM_LPAE_TCR_TG0_4K (0 << 14)
+#define ARM_LPAE_TCR_TG0_64K (1 << 14)
+#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+
+#define ARM_LPAE_TCR_SH0_SHIFT 12
+#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH_NS 0
+#define ARM_LPAE_TCR_SH_OS 2
+#define ARM_LPAE_TCR_SH_IS 3
+
+#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_RGN_MASK 0x3
+#define ARM_LPAE_TCR_RGN_NC 0
+#define ARM_LPAE_TCR_RGN_WBWA 1
+#define ARM_LPAE_TCR_RGN_WT 2
+#define ARM_LPAE_TCR_RGN_WB 3
+
+#define ARM_LPAE_TCR_SL0_SHIFT 6
+#define ARM_LPAE_TCR_SL0_MASK 0x3
+
+#define ARM_LPAE_TCR_T0SZ_SHIFT 0
+#define ARM_LPAE_TCR_SZ_MASK 0xf
+
+#define ARM_LPAE_TCR_PS_SHIFT 16
+#define ARM_LPAE_TCR_PS_MASK 0x7
+
+#define ARM_LPAE_TCR_IPS_SHIFT 32
+#define ARM_LPAE_TCR_IPS_MASK 0x7
+
+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
+
+#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
+#define ARM_LPAE_MAIR_ATTR_MASK 0xff
+#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
+#define ARM_LPAE_MAIR_ATTR_NC 0x44
+#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
+#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
+#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
+#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+
+/* IOPTE accessors */
+#define iopte_deref(pte,d) \
+ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
+ & ~((1ULL << (d)->pg_shift) - 1)))
+
+#define iopte_type(pte,l) \
+ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
+
+#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
+
+#define iopte_leaf(pte,l) \
+ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
+
+#define iopte_to_pfn(pte,d) \
+ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
+
+#define pfn_to_iopte(pfn,d) \
+ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
+
+struct arm_lpae_io_pgtable {
+ struct io_pgtable iop;
+
+ int levels;
+ size_t pgd_size;
+ unsigned long pg_shift;
+ unsigned long bits_per_level;
+
+ void *pgd;
+};
+
+typedef u64 arm_lpae_iopte;
+
+static bool selftest_running = false;
+
+static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte = prot;
+
+ /* We require an unmap first */
+ if (iopte_leaf(*ptep, lvl)) {
+ WARN_ON(!selftest_running);
+ return -EEXIST;
+ }
+
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NS;
+
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ pte |= ARM_LPAE_PTE_TYPE_PAGE;
+ else
+ pte |= ARM_LPAE_PTE_TYPE_BLOCK;
+
+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
+
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+ return 0;
+}
+
+static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
+ phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
+ int lvl, arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *cptep, pte;
+ void *cookie = data->iop.cookie;
+ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ /* Find our entry at the current level */
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+
+ /* If we can install a leaf entry at this level, then do so */
+ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+
+ /* We can't allocate tables at the final level */
+ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
+ return -EINVAL;
+
+ /* Grab a pointer to the next level */
+ pte = *ptep;
+ if (!pte) {
+ cptep = alloc_pages_exact(1UL << data->pg_shift,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!cptep)
+ return -ENOMEM;
+
+ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
+ cookie);
+ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_LPAE_PTE_NSTABLE;
+ *ptep = pte;
+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ } else {
+ cptep = iopte_deref(pte, data);
+ }
+
+ /* Rinse, repeat */
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+}
+
+static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
+ int prot)
+{
+ arm_lpae_iopte pte;
+
+ if (data->iop.fmt == ARM_64_LPAE_S1 ||
+ data->iop.fmt == ARM_32_LPAE_S1) {
+ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+
+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
+ pte |= ARM_LPAE_PTE_AP_RDONLY;
+
+ if (prot & IOMMU_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ } else {
+ pte = ARM_LPAE_PTE_HAP_FAULT;
+ if (prot & IOMMU_READ)
+ pte |= ARM_LPAE_PTE_HAP_READ;
+ if (prot & IOMMU_WRITE)
+ pte |= ARM_LPAE_PTE_HAP_WRITE;
+ if (prot & IOMMU_CACHE)
+ pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
+ else
+ pte |= ARM_LPAE_PTE_MEMATTR_NC;
+ }
+
+ if (prot & IOMMU_NOEXEC)
+ pte |= ARM_LPAE_PTE_XN;
+
+ return pte;
+}
+
+static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+ arm_lpae_iopte prot;
+
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ prot = arm_lpae_prot_to_pte(data, iommu_prot);
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+}
+
+static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte *start, *end;
+ unsigned long table_size;
+
+ /* Only leaf entries at the last level */
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ return;
+
+ if (lvl == ARM_LPAE_START_LVL(data))
+ table_size = data->pgd_size;
+ else
+ table_size = 1UL << data->pg_shift;
+
+ start = ptep;
+ end = (void *)ptep + table_size;
+
+ while (ptep != end) {
+ arm_lpae_iopte pte = *ptep++;
+
+ if (!pte || iopte_leaf(pte, lvl))
+ continue;
+
+ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
+ }
+
+ free_pages_exact(start, table_size);
+}
+
+static void arm_lpae_free_pgtable(struct io_pgtable *iop)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
+
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ kfree(data);
+}
+
+static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_lpae_iopte prot, int lvl,
+ arm_lpae_iopte *ptep, size_t blk_size)
+{
+ unsigned long blk_start, blk_end;
+ phys_addr_t blk_paddr;
+ arm_lpae_iopte table = 0;
+ void *cookie = data->iop.cookie;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+
+ blk_start = iova & ~(blk_size - 1);
+ blk_end = blk_start + blk_size;
+ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
+
+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
+ arm_lpae_iopte *tablep;
+
+ /* Unmap! */
+ if (blk_start == iova)
+ continue;
+
+ /* __arm_lpae_map expects a pointer to the start of the table */
+ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
+ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
+ tablep) < 0) {
+ if (table) {
+ /* Free the table we allocated */
+ tablep = iopte_deref(table, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, tablep);
+ }
+ return 0; /* Bytes unmapped */
+ }
+ }
+
+ *ptep = table;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+ iova &= ~(blk_size - 1);
+ tlb->tlb_add_flush(iova, blk_size, true, cookie);
+ return size;
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_lpae_iopte *ptep)
+{
+ arm_lpae_iopte pte;
+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+ void *cookie = data->iop.cookie;
+ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
+ pte = *ptep;
+
+ /* Something went horribly wrong and we ran out of page table */
+ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
+ return 0;
+
+ /* If the size matches this level, we're in the right place */
+ if (size == blk_size) {
+ *ptep = 0;
+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+
+ if (!iopte_leaf(pte, lvl)) {
+ /* Also flush any partial walks */
+ tlb->tlb_add_flush(iova, size, false, cookie);
+ tlb->tlb_sync(data->iop.cookie);
+ ptep = iopte_deref(pte, data);
+ __arm_lpae_free_pgtable(data, lvl + 1, ptep);
+ } else {
+ tlb->tlb_add_flush(iova, size, true, cookie);
+ }
+
+ return size;
+ } else if (iopte_leaf(pte, lvl)) {
+ /*
+ * Insert a table at the next level to map the old region,
+ * minus the part we want to unmap
+ */
+ return arm_lpae_split_blk_unmap(data, iova, size,
+ iopte_prot(pte), lvl, ptep,
+ blk_size);
+ }
+
+ /* Keep on walkin' */
+ ptep = iopte_deref(pte, data);
+ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+}
+
+static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
+{
+ size_t unmapped;
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable *iop = &data->iop;
+ arm_lpae_iopte *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
+ if (unmapped)
+ iop->cfg.tlb->tlb_sync(iop->cookie);
+
+ return unmapped;
+}
+
+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_lpae_iopte pte, *ptep = data->pgd;
+ int lvl = ARM_LPAE_START_LVL(data);
+
+ do {
+ /* Valid IOPTE pointer? */
+ if (!ptep)
+ return 0;
+
+ /* Grab the IOPTE we're interested in */
+ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
+
+ /* Valid entry? */
+ if (!pte)
+ return 0;
+
+ /* Leaf entry? */
+ if (iopte_leaf(pte,lvl))
+ goto found_translation;
+
+ /* Take it to the next level */
+ ptep = iopte_deref(pte, data);
+ } while (++lvl < ARM_LPAE_MAX_LEVELS);
+
+ /* Ran out of page tables to walk */
+ return 0;
+
+found_translation:
+ iova &= ((1 << data->pg_shift) - 1);
+ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+}
+
+static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
+{
+ unsigned long granule;
+
+ /*
+ * We need to restrict the supported page sizes to match the
+ * translation regime for a particular granule. Aim to match
+ * the CPU page size if possible, otherwise prefer smaller sizes.
+ * While we're at it, restrict the block sizes to match the
+ * chosen granule.
+ */
+ if (cfg->pgsize_bitmap & PAGE_SIZE)
+ granule = PAGE_SIZE;
+ else if (cfg->pgsize_bitmap & ~PAGE_MASK)
+ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
+ else if (cfg->pgsize_bitmap & PAGE_MASK)
+ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
+ else
+ granule = 0;
+
+ switch (granule) {
+ case SZ_4K:
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ break;
+ case SZ_16K:
+ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
+ break;
+ case SZ_64K:
+ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
+ break;
+ default:
+ cfg->pgsize_bitmap = 0;
+ }
+}
+
+static struct arm_lpae_io_pgtable *
+arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
+{
+ unsigned long va_bits, pgd_bits;
+ struct arm_lpae_io_pgtable *data;
+
+ arm_lpae_restrict_pgsizes(cfg);
+
+ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
+ return NULL;
+
+ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
+ return NULL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+
+ va_bits = cfg->ias - data->pg_shift;
+ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+
+ /* Calculate the actual size of our pgd (without concatenation) */
+ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = arm_lpae_map,
+ .unmap = arm_lpae_unmap,
+ .iova_to_phys = arm_lpae_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /* TCR */
+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ cfg->arm_lpae_s1_cfg.tcr = reg;
+
+ /* MAIRs */
+ reg = (ARM_LPAE_MAIR_ATTR_NC
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+ (ARM_LPAE_MAIR_ATTR_WBRWA
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+ (ARM_LPAE_MAIR_ATTR_DEVICE
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+
+ cfg->arm_lpae_s1_cfg.mair[0] = reg;
+ cfg->arm_lpae_s1_cfg.mair[1] = 0;
+
+ /* Looking good; allocate a pgd */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* TTBRs */
+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ u64 reg, sl;
+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+
+ if (!data)
+ return NULL;
+
+ /*
+ * Concatenate PGDs at level 1 if possible in order to reduce
+ * the depth of the stage-2 walk.
+ */
+ if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ unsigned long pgd_pages;
+
+ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
+ data->pgd_size = pgd_pages << data->pg_shift;
+ data->levels--;
+ }
+ }
+
+ /* VTCR */
+ reg = ARM_64_LPAE_S2_TCR_RES1 |
+ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+
+ sl = ARM_LPAE_START_LVL(data);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= ARM_LPAE_TCR_TG0_4K;
+ sl++; /* SL0 format is different for 4K granule size */
+ break;
+ case SZ_16K:
+ reg |= ARM_LPAE_TCR_TG0_16K;
+ break;
+ case SZ_64K:
+ reg |= ARM_LPAE_TCR_TG0_64K;
+ break;
+ }
+
+ switch (cfg->oas) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
+ break;
+ default:
+ goto out_free_data;
+ }
+
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
+ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
+ cfg->arm_lpae_s2_cfg.vtcr = reg;
+
+ /* Allocate pgd pages */
+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+ if (!data->pgd)
+ goto out_free_data;
+
+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+
+ /* VTTBR */
+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ return &data->iop;
+
+out_free_data:
+ kfree(data);
+ return NULL;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 32 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
+ if (iop) {
+ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
+ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
+ }
+
+ return iop;
+}
+
+static struct io_pgtable *
+arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct io_pgtable *iop;
+
+ if (cfg->ias > 40 || cfg->oas > 40)
+ return NULL;
+
+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
+ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
+ if (iop)
+ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
+
+ return iop;
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
+ .alloc = arm_64_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s1,
+ .free = arm_lpae_free_pgtable,
+};
+
+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
+ .alloc = arm_32_lpae_alloc_pgtable_s2,
+ .free = arm_lpae_free_pgtable,
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+ void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+ .tlb_flush_all = dummy_tlb_flush_all,
+ .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_sync = dummy_tlb_sync,
+ .flush_pgtable = dummy_flush_pgtable,
+};
+
+static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
+{
+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
+ cfg->pgsize_bitmap, cfg->ias);
+ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
+ data->levels, data->pgd_size, data->pg_shift,
+ data->bits_per_level, data->pgd);
+}
+
+#define __FAIL(ops, i) ({ \
+ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
+ arm_lpae_dump_ops(ops); \
+ selftest_running = false; \
+ -EFAULT; \
+})
+
+static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
+{
+ static const enum io_pgtable_fmt fmts[] = {
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ };
+
+ int i, j;
+ unsigned long iova;
+ size_t size;
+ struct io_pgtable_ops *ops;
+
+ selftest_running = true;
+
+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+ cfg_cookie = cfg;
+ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
+ if (!ops) {
+ pr_err("selftest: failed to allocate io pgtable ops\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+ if (ops->iova_to_phys(ops, 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + 42))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_2G + 42))
+ return __FAIL(ops, i);
+
+ /*
+ * Distinct mappings of different granule sizes.
+ */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->map(ops, iova, iova, size, IOMMU_READ |
+ IOMMU_WRITE |
+ IOMMU_NOEXEC |
+ IOMMU_CACHE))
+ return __FAIL(ops, i);
+
+ /* Overlapping mappings */
+ if (!ops->map(ops, iova, iova + size, size,
+ IOMMU_READ | IOMMU_NOEXEC))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ /* Partial unmap */
+ size = 1UL << __ffs(cfg->pgsize_bitmap);
+ if (ops->unmap(ops, SZ_1G + size, size) != size)
+ return __FAIL(ops, i);
+
+ /* Remap of partial unmap */
+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+ return __FAIL(ops, i);
+
+ /* Full unmap */
+ iova = 0;
+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
+ while (j != BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->unmap(ops, iova, size) != size)
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42))
+ return __FAIL(ops, i);
+
+ /* Remap full block */
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ return __FAIL(ops, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops, i);
+
+ iova += SZ_1G;
+ j++;
+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
+ }
+
+ free_io_pgtable_ops(ops);
+ }
+
+ selftest_running = false;
+ return 0;
+}
+
+static int __init arm_lpae_do_selftests(void)
+{
+ static const unsigned long pgsize[] = {
+ SZ_4K | SZ_2M | SZ_1G,
+ SZ_16K | SZ_32M,
+ SZ_64K | SZ_512M,
+ };
+
+ static const unsigned int ias[] = {
+ 32, 36, 40, 42, 44, 48,
+ };
+
+ int i, j, pass = 0, fail = 0;
+ struct io_pgtable_cfg cfg = {
+ .tlb = &dummy_tlb_ops,
+ .oas = 48,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
+ for (j = 0; j < ARRAY_SIZE(ias); ++j) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = ias[j];
+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
+ pgsize[i], ias[j]);
+ if (arm_lpae_run_tests(&cfg))
+ fail++;
+ else
+ pass++;
+ }
+ }
+
+ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
+ return fail ? -EFAULT : 0;
+}
+subsys_initcall(arm_lpae_do_selftests);
+#endif
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
new file mode 100644
index 000000000000..6436fe24bc2f
--- /dev/null
+++ b/drivers/iommu/io-pgtable.c
@@ -0,0 +1,82 @@
+/*
+ * Generic page table allocator for IOMMUs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "io-pgtable.h"
+
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
+static const struct io_pgtable_init_fns *
+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
+{
+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
+ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
+ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
+ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
+ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
+#endif
+};
+
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie)
+{
+ struct io_pgtable *iop;
+ const struct io_pgtable_init_fns *fns;
+
+ if (fmt >= IO_PGTABLE_NUM_FMTS)
+ return NULL;
+
+ fns = io_pgtable_init_table[fmt];
+ if (!fns)
+ return NULL;
+
+ iop = fns->alloc(cfg, cookie);
+ if (!iop)
+ return NULL;
+
+ iop->fmt = fmt;
+ iop->cookie = cookie;
+ iop->cfg = *cfg;
+
+ return &iop->ops;
+}
+
+/*
+ * It is the IOMMU driver's responsibility to ensure that the page table
+ * is no longer accessible to the walker by this point.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops)
+{
+ struct io_pgtable *iop;
+
+ if (!ops)
+ return;
+
+ iop = container_of(ops, struct io_pgtable, ops);
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ io_pgtable_init_table[iop->fmt]->free(iop);
+}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
new file mode 100644
index 000000000000..10e32f69c668
--- /dev/null
+++ b/drivers/iommu/io-pgtable.h
@@ -0,0 +1,143 @@
+#ifndef __IO_PGTABLE_H
+#define __IO_PGTABLE_H
+
+/*
+ * Public API for use by IOMMU drivers
+ */
+enum io_pgtable_fmt {
+ ARM_32_LPAE_S1,
+ ARM_32_LPAE_S2,
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ IO_PGTABLE_NUM_FMTS,
+};
+
+/**
+ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
+ *
+ * @tlb_flush_all: Synchronously invalidate the entire TLB context.
+ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
+ * @tlb_sync: Ensure any queue TLB invalidation has taken effect.
+ * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ *
+ * Note that these can all be called in atomic context and must therefore
+ * not block.
+ */
+struct iommu_gather_ops {
+ void (*tlb_flush_all)(void *cookie);
+ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
+ void *cookie);
+ void (*tlb_sync)(void *cookie);
+ void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
+};
+
+/**
+ * struct io_pgtable_cfg - Configuration data for a set of page tables.
+ *
+ * @quirks: A bitmap of hardware quirks that require some special
+ * action by the low-level page table allocator.
+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
+ * tables.
+ * @ias: Input address (iova) size, in bits.
+ * @oas: Output address (paddr) size, in bits.
+ * @tlb: TLB management callbacks for this set of tables.
+ */
+struct io_pgtable_cfg {
+ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
+ int quirks;
+ unsigned long pgsize_bitmap;
+ unsigned int ias;
+ unsigned int oas;
+ const struct iommu_gather_ops *tlb;
+
+ /* Low-level data specific to the table format */
+ union {
+ struct {
+ u64 ttbr[2];
+ u64 tcr;
+ u64 mair[2];
+ } arm_lpae_s1_cfg;
+
+ struct {
+ u64 vttbr;
+ u64 vtcr;
+ } arm_lpae_s2_cfg;
+ };
+};
+
+/**
+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
+ *
+ * @map: Map a physically contiguous memory region.
+ * @unmap: Unmap a physically contiguous memory region.
+ * @iova_to_phys: Translate iova to physical address.
+ *
+ * These functions map directly onto the iommu_ops member functions with
+ * the same names.
+ */
+struct io_pgtable_ops {
+ int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size);
+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
+ unsigned long iova);
+};
+
+/**
+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
+ *
+ * @fmt: The page table format.
+ * @cfg: The page table configuration. This will be modified to represent
+ * the configuration actually provided by the allocator (e.g. the
+ * pgsize_bitmap may be restricted).
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * the callback routines in cfg->tlb.
+ */
+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
+ struct io_pgtable_cfg *cfg,
+ void *cookie);
+
+/**
+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
+ * *must* ensure that the page table is no longer
+ * live, but the TLB can be dirty.
+ *
+ * @ops: The ops returned from alloc_io_pgtable_ops.
+ */
+void free_io_pgtable_ops(struct io_pgtable_ops *ops);
+
+
+/*
+ * Internal structures for page table allocator implementations.
+ */
+
+/**
+ * struct io_pgtable - Internal structure describing a set of page tables.
+ *
+ * @fmt: The page table format.
+ * @cookie: An opaque token provided by the IOMMU driver and passed back to
+ * any callback routines.
+ * @cfg: A copy of the page table configuration.
+ * @ops: The page table operations in use for this set of page tables.
+ */
+struct io_pgtable {
+ enum io_pgtable_fmt fmt;
+ void *cookie;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops ops;
+};
+
+/**
+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
+ * particular format.
+ *
+ * @alloc: Allocate a set of page tables described by cfg.
+ * @free: Free the page tables associated with iop.
+ */
+struct io_pgtable_init_fns {
+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
+ void (*free)(struct io_pgtable *iop);
+};
+
+#endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 748693192c20..10186cac7716 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/platform_data/ipmmu-vmsa.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -24,12 +24,13 @@
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
+#include "io-pgtable.h"
+
struct ipmmu_vmsa_device {
struct device *dev;
void __iomem *base;
struct list_head list;
- const struct ipmmu_vmsa_platform_data *pdata;
unsigned int num_utlbs;
struct dma_iommu_mapping *mapping;
@@ -39,14 +40,17 @@ struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu;
struct iommu_domain *io_domain;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops *iop;
+
unsigned int context_id;
spinlock_t lock; /* Protects mappings */
- pgd_t *pgd;
};
struct ipmmu_vmsa_archdata {
struct ipmmu_vmsa_device *mmu;
- unsigned int utlb;
+ unsigned int *utlbs;
+ unsigned int num_utlbs;
};
static DEFINE_SPINLOCK(ipmmu_devices_lock);
@@ -58,6 +62,8 @@ static LIST_HEAD(ipmmu_devices);
* Registers Definition
*/
+#define IM_NS_ALIAS_OFFSET 0x800
+
#define IM_CTX_SIZE 0x40
#define IMCTR 0x0000
@@ -171,52 +177,6 @@ static LIST_HEAD(ipmmu_devices);
#define IMUASID_ASID0_SHIFT 0
/* -----------------------------------------------------------------------------
- * Page Table Bits
- */
-
-/*
- * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access,
- * Long-descriptor format" that the NStable bit being set in a table descriptor
- * will result in the NStable and NS bits of all child entries being ignored and
- * considered as being set. The IPMMU seems not to comply with this, as it
- * generates a secure access page fault if any of the NStable and NS bits isn't
- * set when running in non-secure mode.
- */
-#ifndef PMD_NSTABLE
-#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63)
-#endif
-
-#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53)
-#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52)
-#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10)
-#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8)
-#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8)
-#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8)
-#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8)
-#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5)
-#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0)
-
-/* Stage-1 PTE */
-#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11)
-#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6)
-#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6)
-#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6)
-#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2)
-#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2
-
-#define ARM_VMSA_PTE_ATTRS_MASK \
- (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
- ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
- ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
-
-#define ARM_VMSA_PTE_CONT_ENTRIES 16
-#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
-
-#define IPMMU_PTRS_PER_PTE 512
-#define IPMMU_PTRS_PER_PMD 512
-#define IPMMU_PTRS_PER_PGD 4
-
-/* -----------------------------------------------------------------------------
* Read/Write Access
*/
@@ -305,18 +265,39 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
ipmmu_write(mmu, IMUCTR(utlb), 0);
}
-static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
- size_t size)
+static void ipmmu_tlb_flush_all(void *cookie)
+{
+ struct ipmmu_vmsa_domain *domain = cookie;
+
+ ipmmu_tlb_invalidate(domain);
+}
+
+static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
+ void *cookie)
{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+ /* The hardware doesn't support selective TLB flush. */
+}
+
+static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
+{
+ unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
+ struct ipmmu_vmsa_domain *domain = cookie;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling.
*/
- dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE);
+ dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
+ DMA_TO_DEVICE);
}
+static struct iommu_gather_ops ipmmu_gather_ops = {
+ .tlb_flush_all = ipmmu_tlb_flush_all,
+ .tlb_add_flush = ipmmu_tlb_add_flush,
+ .tlb_sync = ipmmu_tlb_flush_all,
+ .flush_pgtable = ipmmu_flush_pgtable,
+};
+
/* -----------------------------------------------------------------------------
* Domain/Context Management
*/
@@ -324,7 +305,28 @@ static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
phys_addr_t ttbr;
- u32 reg;
+
+ /*
+ * Allocate the page table operations.
+ *
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
+ * access, Long-descriptor format" that the NStable bit being set in a
+ * table descriptor will result in the NStable and NS bits of all child
+ * entries being ignored and considered as being set. The IPMMU seems
+ * not to comply with this, as it generates a secure access page fault
+ * if any of the NStable and NS bits isn't set when running in
+ * non-secure mode.
+ */
+ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
+ domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
+ domain->cfg.ias = 32;
+ domain->cfg.oas = 40;
+ domain->cfg.tlb = &ipmmu_gather_ops;
+
+ domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
+ domain);
+ if (!domain->iop)
+ return -EINVAL;
/*
* TODO: When adding support for multiple contexts, find an unused
@@ -333,9 +335,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->context_id = 0;
/* TTBR0 */
- ipmmu_flush_pgtable(domain->mmu, domain->pgd,
- IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd));
- ttbr = __pa(domain->pgd);
+ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
@@ -348,15 +348,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
- /*
- * MAIR0
- * We need three attributes only, non-cacheable, write-back read/write
- * allocate and device memory.
- */
- reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC))
- | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA))
- | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV));
- ipmmu_ctx_write(domain, IMMAIR0, reg);
+ /* MAIR0 */
+ ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
/* IMBUSCR */
ipmmu_ctx_write(domain, IMBUSCR,
@@ -461,396 +454,6 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
}
/* -----------------------------------------------------------------------------
- * Page Table Management
- */
-
-#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
-
-static void ipmmu_free_ptes(pmd_t *pmd)
-{
- pgtable_t table = pmd_pgtable(*pmd);
- __free_page(table);
-}
-
-static void ipmmu_free_pmds(pud_t *pud)
-{
- pmd_t *pmd = pmd_offset(pud, 0);
- pgtable_t table;
- unsigned int i;
-
- for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
- if (!pmd_table(*pmd))
- continue;
-
- ipmmu_free_ptes(pmd);
- pmd++;
- }
-
- table = pud_pgtable(*pud);
- __free_page(table);
-}
-
-static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
-{
- pgd_t *pgd, *pgd_base = domain->pgd;
- unsigned int i;
-
- /*
- * Recursively free the page tables for this domain. We don't care about
- * speculative TLB filling, because the TLB will be nuked next time this
- * context bank is re-allocated and no devices currently map to these
- * tables.
- */
- pgd = pgd_base;
- for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
- if (pgd_none(*pgd))
- continue;
- ipmmu_free_pmds((pud_t *)pgd);
- pgd++;
- }
-
- kfree(pgd_base);
-}
-
-/*
- * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte)
- * functions as they would flush the CPU TLB.
- */
-
-static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova)
-{
- pte_t *pte;
-
- if (!pmd_none(*pmd))
- return pte_offset_kernel(pmd, iova);
-
- pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pte)
- return NULL;
-
- ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
- *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return pte + pte_index(iova);
-}
-
-static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
- unsigned long iova)
-{
- pud_t *pud = (pud_t *)pgd;
- pmd_t *pmd;
-
- if (!pud_none(*pud))
- return pmd_offset(pud, iova);
-
- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pmd)
- return NULL;
-
- ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
- *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
-
- return pmd + pmd_index(iova);
-}
-
-static u64 ipmmu_page_prot(unsigned int prot, u64 type)
-{
- u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
- | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
- | ARM_VMSA_PTE_NS | type;
-
- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
- pgprot |= ARM_VMSA_PTE_AP_RDONLY;
-
- if (prot & IOMMU_CACHE)
- pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
-
- if (prot & IOMMU_NOEXEC)
- pgprot |= ARM_VMSA_PTE_XN;
- else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- /* If no access create a faulting entry to avoid TLB fills. */
- pgprot &= ~ARM_VMSA_PTE_PAGE;
-
- return pgprot;
-}
-
-static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova, unsigned long pfn,
- size_t size, int prot)
-{
- pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
- unsigned int num_ptes = 1;
- pte_t *pte, *start;
- unsigned int i;
-
- pte = ipmmu_alloc_pte(mmu, pmd, iova);
- if (!pte)
- return -ENOMEM;
-
- start = pte;
-
- /*
- * Install the page table entries. We can be called both for a single
- * page or for a block of 16 physically contiguous pages. In the latter
- * case set the PTE contiguous hint.
- */
- if (size == SZ_64K) {
- pteval |= ARM_VMSA_PTE_CONT;
- num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
- }
-
- for (i = num_ptes; i; --i)
- *pte++ = pfn_pte(pfn++, __pgprot(pteval));
-
- ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
-
- return 0;
-}
-
-static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
- unsigned long iova, unsigned long pfn,
- int prot)
-{
- pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT);
-
- *pmd = pfn_pmd(pfn, __pgprot(pmdval));
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return 0;
-}
-
-static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
-{
- struct ipmmu_vmsa_device *mmu = domain->mmu;
- pgd_t *pgd = domain->pgd;
- unsigned long flags;
- unsigned long pfn;
- pmd_t *pmd;
- int ret;
-
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- if (paddr & ~((1ULL << 40) - 1))
- return -ERANGE;
-
- pfn = __phys_to_pfn(paddr);
- pgd += pgd_index(iova);
-
- /* Update the page tables. */
- spin_lock_irqsave(&domain->lock, flags);
-
- pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
- if (!pmd) {
- ret = -ENOMEM;
- goto done;
- }
-
- switch (size) {
- case SZ_2M:
- ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot);
- break;
- case SZ_64K:
- case SZ_4K:
- ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
-done:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (!ret)
- ipmmu_tlb_invalidate(domain);
-
- return ret;
-}
-
-static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
-{
- /* Free the page table. */
- pgtable_t table = pud_pgtable(*pud);
- __free_page(table);
-
- /* Clear the PUD. */
- *pud = __pud(0);
- ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
-}
-
-static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
- pmd_t *pmd)
-{
- unsigned int i;
-
- /* Free the page table. */
- if (pmd_table(*pmd)) {
- pgtable_t table = pmd_pgtable(*pmd);
- __free_page(table);
- }
-
- /* Clear the PMD. */
- *pmd = __pmd(0);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- /* Check whether the PUD is still needed. */
- pmd = pmd_offset(pud, 0);
- for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
- if (!pmd_none(pmd[i]))
- return;
- }
-
- /* Clear the parent PUD. */
- ipmmu_clear_pud(mmu, pud);
-}
-
-static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
- pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
-{
- unsigned int i;
-
- /* Clear the PTE. */
- for (i = num_ptes; i; --i)
- pte[i-1] = __pte(0);
-
- ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
-
- /* Check whether the PMD is still needed. */
- pte = pte_offset_kernel(pmd, 0);
- for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
- if (!pte_none(pte[i]))
- return;
- }
-
- /* Clear the parent PMD. */
- ipmmu_clear_pmd(mmu, pud, pmd);
-}
-
-static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
-{
- pte_t *pte, *start;
- pteval_t pteval;
- unsigned long pfn;
- unsigned int i;
-
- pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
- if (!pte)
- return -ENOMEM;
-
- /* Copy the PMD attributes. */
- pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
- | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
-
- pfn = pmd_pfn(*pmd);
- start = pte;
-
- for (i = IPMMU_PTRS_PER_PTE; i; --i)
- *pte++ = pfn_pte(pfn++, __pgprot(pteval));
-
- ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
- *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
- ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
-
- return 0;
-}
-
-static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
-{
- unsigned int i;
-
- for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
- pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
-
- ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
-}
-
-static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
- unsigned long iova, size_t size)
-{
- struct ipmmu_vmsa_device *mmu = domain->mmu;
- unsigned long flags;
- pgd_t *pgd = domain->pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int ret = 0;
-
- if (!pgd)
- return -EINVAL;
-
- if (size & ~PAGE_MASK)
- return -EINVAL;
-
- pgd += pgd_index(iova);
- pud = (pud_t *)pgd;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- /* If there's no PUD or PMD we're done. */
- if (pud_none(*pud))
- goto done;
-
- pmd = pmd_offset(pud, iova);
- if (pmd_none(*pmd))
- goto done;
-
- /*
- * When freeing a 2MB block just clear the PMD. In the unlikely case the
- * block is mapped as individual pages this will free the corresponding
- * PTE page table.
- */
- if (size == SZ_2M) {
- ipmmu_clear_pmd(mmu, pud, pmd);
- goto done;
- }
-
- /*
- * If the PMD has been mapped as a section remap it as pages to allow
- * freeing individual pages.
- */
- if (pmd_sect(*pmd))
- ipmmu_split_pmd(mmu, pmd);
-
- pte = pte_offset_kernel(pmd, iova);
-
- /*
- * When freeing a 64kB block just clear the PTE entries. We don't have
- * to care about the contiguous hint of the surrounding entries.
- */
- if (size == SZ_64K) {
- ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
- goto done;
- }
-
- /*
- * If the PTE has been mapped with the contiguous hint set remap it and
- * its surrounding PTEs to allow unmapping a single page.
- */
- if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
- ipmmu_split_pte(mmu, pte);
-
- /* Clear the PTE. */
- ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
-
-done:
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (ret)
- ipmmu_tlb_invalidate(domain);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* IOMMU Operations
*/
@@ -864,12 +467,6 @@ static int ipmmu_domain_init(struct iommu_domain *io_domain)
spin_lock_init(&domain->lock);
- domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
- if (!domain->pgd) {
- kfree(domain);
- return -ENOMEM;
- }
-
io_domain->priv = domain;
domain->io_domain = io_domain;
@@ -885,7 +482,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
* been detached.
*/
ipmmu_domain_destroy_context(domain);
- ipmmu_free_pgtables(domain);
+ free_io_pgtable_ops(domain->iop);
kfree(domain);
}
@@ -896,6 +493,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
unsigned long flags;
+ unsigned int i;
int ret = 0;
if (!mmu) {
@@ -924,7 +522,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
if (ret < 0)
return ret;
- ipmmu_utlb_enable(domain, archdata->utlb);
+ for (i = 0; i < archdata->num_utlbs; ++i)
+ ipmmu_utlb_enable(domain, archdata->utlbs[i]);
return 0;
}
@@ -934,8 +533,10 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
+ unsigned int i;
- ipmmu_utlb_disable(domain, archdata->utlb);
+ for (i = 0; i < archdata->num_utlbs; ++i)
+ ipmmu_utlb_disable(domain, archdata->utlbs[i]);
/*
* TODO: Optimize by disabling the context when no device is attached.
@@ -950,76 +551,61 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
- return ipmmu_create_mapping(domain, iova, paddr, size, prot);
+ return domain->iop->map(domain->iop, iova, paddr, size, prot);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
- int ret;
- ret = ipmmu_clear_mapping(domain, iova, size);
- return ret ? 0 : size;
+ return domain->iop->unmap(domain->iop, iova, size);
}
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
- pgd_t pgd;
- pud_t pud;
- pmd_t pmd;
- pte_t pte;
/* TODO: Is locking needed ? */
- if (!domain->pgd)
- return 0;
-
- pgd = *(domain->pgd + pgd_index(iova));
- if (pgd_none(pgd))
- return 0;
-
- pud = *pud_offset(&pgd, iova);
- if (pud_none(pud))
- return 0;
+ return domain->iop->iova_to_phys(domain->iop, iova);
+}
- pmd = *pmd_offset(&pud, iova);
- if (pmd_none(pmd))
- return 0;
+static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
+ unsigned int *utlbs, unsigned int num_utlbs)
+{
+ unsigned int i;
- if (pmd_sect(pmd))
- return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK);
+ for (i = 0; i < num_utlbs; ++i) {
+ struct of_phandle_args args;
+ int ret;
- pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
- if (pte_none(pte))
- return 0;
+ ret = of_parse_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells", i, &args);
+ if (ret < 0)
+ return ret;
- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
-}
+ of_node_put(args.np);
-static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev)
-{
- const struct ipmmu_vmsa_master *master = mmu->pdata->masters;
- const char *devname = dev_name(dev);
- unsigned int i;
+ if (args.np != mmu->dev->of_node || args.args_count != 1)
+ return -EINVAL;
- for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) {
- if (strcmp(master->name, devname) == 0)
- return master->utlb;
+ utlbs[i] = args.args[0];
}
- return -1;
+ return 0;
}
static int ipmmu_add_device(struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata;
struct ipmmu_vmsa_device *mmu;
- struct iommu_group *group;
- int utlb = -1;
- int ret;
+ struct iommu_group *group = NULL;
+ unsigned int *utlbs;
+ unsigned int i;
+ int num_utlbs;
+ int ret = -ENODEV;
if (dev->archdata.iommu) {
dev_warn(dev, "IOMMU driver already assigned to device %s\n",
@@ -1028,11 +614,21 @@ static int ipmmu_add_device(struct device *dev)
}
/* Find the master corresponding to the device. */
+
+ num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
+ "#iommu-cells");
+ if (num_utlbs < 0)
+ return -ENODEV;
+
+ utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
+ if (!utlbs)
+ return -ENOMEM;
+
spin_lock(&ipmmu_devices_lock);
list_for_each_entry(mmu, &ipmmu_devices, list) {
- utlb = ipmmu_find_utlb(mmu, dev);
- if (utlb >= 0) {
+ ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
+ if (!ret) {
/*
* TODO Take a reference to the MMU to protect
* against device removal.
@@ -1043,17 +639,22 @@ static int ipmmu_add_device(struct device *dev)
spin_unlock(&ipmmu_devices_lock);
- if (utlb < 0)
+ if (ret < 0)
return -ENODEV;
- if (utlb >= mmu->num_utlbs)
- return -EINVAL;
+ for (i = 0; i < num_utlbs; ++i) {
+ if (utlbs[i] >= mmu->num_utlbs) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
/* Create a device group and add the device to it. */
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
+ ret = PTR_ERR(group);
+ goto error;
}
ret = iommu_group_add_device(group, dev);
@@ -1061,7 +662,8 @@ static int ipmmu_add_device(struct device *dev)
if (ret < 0) {
dev_err(dev, "Failed to add device to IPMMU group\n");
- return ret;
+ group = NULL;
+ goto error;
}
archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
@@ -1071,7 +673,8 @@ static int ipmmu_add_device(struct device *dev)
}
archdata->mmu = mmu;
- archdata->utlb = utlb;
+ archdata->utlbs = utlbs;
+ archdata->num_utlbs = num_utlbs;
dev->archdata.iommu = archdata;
/*
@@ -1090,7 +693,8 @@ static int ipmmu_add_device(struct device *dev)
SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
- return PTR_ERR(mapping);
+ ret = PTR_ERR(mapping);
+ goto error;
}
mmu->mapping = mapping;
@@ -1106,17 +710,29 @@ static int ipmmu_add_device(struct device *dev)
return 0;
error:
+ arm_iommu_release_mapping(mmu->mapping);
+
kfree(dev->archdata.iommu);
+ kfree(utlbs);
+
dev->archdata.iommu = NULL;
- iommu_group_remove_device(dev);
+
+ if (!IS_ERR_OR_NULL(group))
+ iommu_group_remove_device(dev);
+
return ret;
}
static void ipmmu_remove_device(struct device *dev)
{
+ struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
+
arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
- kfree(dev->archdata.iommu);
+
+ kfree(archdata->utlbs);
+ kfree(archdata);
+
dev->archdata.iommu = NULL;
}
@@ -1131,7 +747,7 @@ static const struct iommu_ops ipmmu_ops = {
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.remove_device = ipmmu_remove_device,
- .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K,
+ .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
};
/* -----------------------------------------------------------------------------
@@ -1154,7 +770,7 @@ static int ipmmu_probe(struct platform_device *pdev)
int irq;
int ret;
- if (!pdev->dev.platform_data) {
+ if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
dev_err(&pdev->dev, "missing platform data\n");
return -EINVAL;
}
@@ -1166,7 +782,6 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
- mmu->pdata = pdev->dev.platform_data;
mmu->num_utlbs = 32;
/* Map I/O memory and request IRQ. */
@@ -1175,6 +790,20 @@ static int ipmmu_probe(struct platform_device *pdev)
if (IS_ERR(mmu->base))
return PTR_ERR(mmu->base);
+ /*
+ * The IPMMU has two register banks, for secure and non-secure modes.
+ * The bank mapped at the beginning of the IPMMU address space
+ * corresponds to the running mode of the CPU. When running in secure
+ * mode the non-secure register bank is also available at an offset.
+ *
+ * Secure mode operation isn't clearly documented and is thus currently
+ * not implemented in the driver. Furthermore, preliminary tests of
+ * non-secure operation with the main register bank were not successful.
+ * Offset the registers base unconditionally to point to the non-secure
+ * alias space for now.
+ */
+ mmu->base += IM_NS_ALIAS_OFFSET;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n");
@@ -1220,9 +849,14 @@ static int ipmmu_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ipmmu_of_ids[] = {
+ { .compatible = "renesas,ipmmu-vmsa", },
+};
+
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
+ .of_match_table = of_match_ptr(ipmmu_of_ids),
},
.probe = ipmmu_probe,
.remove = ipmmu_remove,
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index bbb7dcef02d3..f59f857b702e 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1126,7 +1126,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return -EINVAL;
}
- dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
+ dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
iotlb_init_entry(&e, da, pa, omap_pgsz);
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
+ .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
return 0;
}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index d111ac779c40..63cd031b2c28 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -28,7 +28,7 @@
#define AT91_AIC_IRQ_MIN_PRIORITY 0
#define AT91_AIC_IRQ_MAX_PRIORITY 7
-#define AT91_AIC_SRCTYPE GENMASK(7, 6)
+#define AT91_AIC_SRCTYPE GENMASK(6, 5)
#define AT91_AIC_SRCTYPE_LOW (0 << 5)
#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
return -EINVAL;
}
- *val &= AT91_AIC_SRCTYPE;
+ *val &= ~AT91_AIC_SRCTYPE;
*val |= aic_type;
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 86e4684adeb1..d8996bdf0f61 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* of two entries. No, the architecture doesn't let you
* express an ITT with a single entry.
*/
- nr_ites = max(2, roundup_pow_of_two(nvecs));
+ nr_ites = max(2UL, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kmalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 29b8f21b74d0..6bc2deb73d53 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
* It will be refined as each CPU probes its ID.
*/
for (i = 0; i < NR_HIP04_CPU_IF; i++)
- hip04_cpu_map[i] = 0xff;
+ hip04_cpu_map[i] = 0xffff;
/*
* Find out how many interrupts are supported.
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 7e342df6a62f..0b0d2c00a2df 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
return -ENOMEM;
chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
- if (!chip_data->intpol_base) {
+ if (IS_ERR(chip_data->intpol_base)) {
pr_err("mtk_sysirq: unable to map sysirq register\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(chip_data->intpol_base);
goto out_free;
}
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 28718d3e8281..c03f140acbae 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
return ret;
}
-static int __init omap_init_irq_legacy(u32 base)
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
{
int j, irq_base;
@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
irq_base = 0;
}
- domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
+ domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
omap_irq_soft_reset();
@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
{
int ret;
- if (node)
+ /*
+ * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+ * depends is still not ready for linear IRQ domains; because of that
+ * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+ * linear IRQ Domain until that driver is finally fixed.
+ */
+ if (of_device_is_compatible(node, "ti,omap2-intc") ||
+ of_device_is_compatible(node, "ti,omap3-intc")) {
+ struct resource res;
+
+ if (of_address_to_resource(node, 0, &res))
+ return -ENOMEM;
+
+ base = res.start;
+ ret = omap_init_irq_legacy(base, node);
+ } else if (node) {
ret = omap_init_irq_of(node);
- else
- ret = omap_init_irq_legacy(base);
+ } else {
+ ret = omap_init_irq_legacy(base, NULL);
+ }
if (ret == 0)
omap_irq_enable_protection();
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 9fc616c2755e..c1c010498a21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
} __packed;
struct dm_cache_metadata {
+ atomic_t ref_count;
+ struct list_head list;
+
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
/*----------------------------------------------------------------*/
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
- sector_t data_block_size,
- bool may_format_device,
- size_t policy_hint_size)
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
{
int r;
struct dm_cache_metadata *cmd;
@@ -680,9 +683,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
DMERR("could not allocate metadata struct");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ atomic_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -705,10 +709,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
return cmd;
}
+/*
+ * We keep a little list of ref counted metadata objects to prevent two
+ * different target instances creating separate bufio instances. This is
+ * an issue if a table is reloaded before the suspend.
+ */
+static DEFINE_MUTEX(table_lock);
+static LIST_HEAD(table);
+
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
+{
+ struct dm_cache_metadata *cmd;
+
+ list_for_each_entry(cmd, &table, list)
+ if (cmd->bdev == bdev) {
+ atomic_inc(&cmd->ref_count);
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd, *cmd2;
+
+ mutex_lock(&table_lock);
+ cmd = lookup(bdev);
+ mutex_unlock(&table_lock);
+
+ if (cmd)
+ return cmd;
+
+ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+ if (!IS_ERR(cmd)) {
+ mutex_lock(&table_lock);
+ cmd2 = lookup(bdev);
+ if (cmd2) {
+ mutex_unlock(&table_lock);
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ return cmd2;
+ }
+ list_add(&cmd->list, &table);
+ mutex_unlock(&table_lock);
+ }
+
+ return cmd;
+}
+
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
+{
+ if (cmd->data_block_size != data_block_size) {
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ (unsigned long long) data_block_size,
+ (unsigned long long) cmd->data_block_size);
+ return false;
+ }
+
+ return true;
+}
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+ may_format_device, policy_hint_size);
+
+ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+ dm_cache_metadata_close(cmd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cmd;
+}
+
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
- __destroy_persistent_data_objects(cmd);
- kfree(cmd);
+ if (atomic_dec_and_test(&cmd->ref_count)) {
+ mutex_lock(&table_lock);
+ list_del(&cmd->list);
+ mutex_unlock(&table_lock);
+
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ }
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1e96d7889f51..e1650539cc2f 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -221,7 +221,13 @@ struct cache {
struct list_head need_commit_migrations;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
- atomic_t nr_migrations;
+ atomic_t nr_allocated_migrations;
+
+ /*
+ * The number of in flight migrations that are performing
+ * background io. eg, promotion, writeback.
+ */
+ atomic_t nr_io_migrations;
wait_queue_head_t quiescing_wait;
atomic_t quiescing;
@@ -258,7 +264,6 @@ struct cache {
struct dm_deferred_set *all_io_ds;
mempool_t *migration_pool;
- struct dm_cache_migration *next_migration;
struct dm_cache_policy *policy;
unsigned policy_nr_args;
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
dm_bio_prison_free_cell(cache->prison, cell);
}
+static struct dm_cache_migration *alloc_migration(struct cache *cache)
+{
+ struct dm_cache_migration *mg;
+
+ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ if (mg) {
+ mg->cache = cache;
+ atomic_inc(&mg->cache->nr_allocated_migrations);
+ }
+
+ return mg;
+}
+
+static void free_migration(struct dm_cache_migration *mg)
+{
+ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
+ wake_up(&mg->cache->migration_wait);
+
+ mempool_free(mg, mg->cache->migration_pool);
+}
+
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
{
if (!p->mg) {
- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ p->mg = alloc_migration(cache);
if (!p->mg)
return -ENOMEM;
}
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
free_prison_cell(cache, p->cell1);
if (p->mg)
- mempool_free(p->mg, cache->migration_pool);
+ free_migration(p->mg);
}
static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
* Migration covers moving data from the origin device to the cache, or
* vice versa.
*--------------------------------------------------------------*/
-static void free_migration(struct dm_cache_migration *mg)
-{
- mempool_free(mg, mg->cache->migration_pool);
-}
-
-static void inc_nr_migrations(struct cache *cache)
+static void inc_io_migrations(struct cache *cache)
{
- atomic_inc(&cache->nr_migrations);
+ atomic_inc(&cache->nr_io_migrations);
}
-static void dec_nr_migrations(struct cache *cache)
+static void dec_io_migrations(struct cache *cache)
{
- atomic_dec(&cache->nr_migrations);
-
- /*
- * Wake the worker in case we're suspending the target.
- */
- wake_up(&cache->migration_wait);
+ atomic_dec(&cache->nr_io_migrations);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
wake_worker(cache);
}
-static void cleanup_migration(struct dm_cache_migration *mg)
+static void free_io_migration(struct dm_cache_migration *mg)
{
- struct cache *cache = mg->cache;
+ dec_io_migrations(mg->cache);
free_migration(mg);
- dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg)
cell_defer(cache, mg->new_ocell, true);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
if (mg->writeback) {
clear_dirty(cache, mg->old_oblock, mg->cblock);
cell_defer(cache, mg->old_ocell, false);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
} else if (mg->demote) {
@@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
mg->old_oblock);
if (mg->promote)
cell_defer(cache, mg->new_ocell, true);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
} else {
if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
policy_remove_mapping(cache->policy, mg->new_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
}
@@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
} else {
if (mg->invalidate)
policy_remove_mapping(cache->policy, mg->old_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
}
} else {
@@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
bio_endio(mg->new_ocell->holder, 0);
cell_defer(cache, mg->new_ocell, false);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
}
@@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = cell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = new_ocell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
static bool spare_migration_bandwidth(struct cache *cache)
{
- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block;
return current_volume < cache->migration_threshold;
}
@@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache)
static void wait_for_migrations(struct cache *cache)
{
- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
}
static void stop_worker(struct cache *cache)
@@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache)
{
unsigned i;
- if (cache->next_migration)
- mempool_free(cache->next_migration, cache->migration_pool);
-
if (cache->migration_pool)
mempool_destroy(cache->migration_pool);
@@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
- atomic_set(&cache->nr_migrations, 0);
+ atomic_set(&cache->nr_allocated_migrations, 0);
+ atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- cache->next_migration = NULL;
-
cache->need_tick_bio = true;
cache->sized = false;
cache->invalidate = false;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 493478989dbd..07705ee181e3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+ dm_device_name(pool->pool_md));
+ return -EINVAL;
+ }
+
if (!strcasecmp(argv[0], "create_thin"))
r = process_create_thin_mesg(argc, argv, pool);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b98cd9d84435..2caf5b374649 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -206,6 +206,9 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
+
struct dm_stats stats;
};
@@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
{
struct dm_table *map = NULL;
- if (dm_suspended_internally_md(md))
+ if (md->internal_suspend_count++)
return; /* nested internal suspend */
if (dm_suspended_md(md)) {
@@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
static void __dm_internal_resume(struct mapped_device *md)
{
- if (!dm_suspended_internally_md(md))
+ BUG_ON(!md->internal_suspend_count);
+
+ if (--md->internal_suspend_count)
return; /* resume from nested internal suspend */
if (dm_suspended_md(md))
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index db99ca2613ba..06931f6fa26c 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR4400] = {
- .name = "Hauppauge WinTV-HVR4400",
+ .name = "Hauppauge WinTV-HVR4400/HVR5500",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
.tuner_addr = 0x60, /* 0xc0 >> 1 */
.tuner_bus = 1,
},
+ [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
+ .name = "Hauppauge WinTV Starburst",
+ .portb = CX23885_MPEG_DVB,
+ },
[CX23885_BOARD_AVERMEDIA_HC81R] = {
.name = "AVerTV Hybrid Express Slim HC81R",
.tuner_type = TUNER_XC2028,
@@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = {
}, {
.subvendor = 0x0070,
.subdevice = 0xc108,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc138,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc12a,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc1f8,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x1461,
.subdevice = 0xd939,
@@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
/* GPIO-8 tda10071 demod reset */
- /* GPIO-9 si2165 demod reset */
+ /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
/* Put the parts into reset and back */
cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
@@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_T982:
ts1->gen_ctrl_val = 0x5; /* Parallel */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 1d9d0f86ca8c..1ad49946d7fa 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
cx23885_shutdown(dev);
- pci_disable_device(pci_dev);
-
/* unregister stuff */
free_irq(pci_dev->irq, dev);
+ pci_disable_device(pci_dev);
+
cx23885_dev_unregister(dev);
vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index c47d18270cfc..a9c450d4b54e 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(tda10071_attach,
+ &hauppauge_tda10071_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_a8293_config);
+ }
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_S950:
i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index f55cd12da0fd..36f2f96c40e4 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -99,6 +99,7 @@
#define CX23885_BOARD_DVBSKY_S950 49
#define CX23885_BOARD_DVBSKY_S952 50
#define CX23885_BOARD_DVBSKY_T982 51
+#define CX23885_BOARD_HAUPPAUGE_STARBURST 52
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index b463fe172d16..3fe9047ef466 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->card, video->video.name, sizeof(cap->card));
strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 8efe40337608..6d885239b16a 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
{
strcpy(cap->driver, "atmel-isi");
strcpy(cap->card, "Atmel Image Sensor Interface");
- cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index ce72bd26a6ac..192377f55840 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index a60c3bb0e4cc..0b3299dee05d 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index e6b93281f246..16f65ecb70a3 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 951226af0eba..8d6e343fec0f 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 0c1f55648106..9f1473c0a0cf 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8b27b3eb2b25..71787702d4a2 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 0f345b1f9014..f327c49d7e09 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
{
"Mygica T230 DVB-T/T2/C",
{ NULL },
- { &cxusb_table[22], NULL },
+ { &cxusb_table[20], NULL },
},
}
};
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 1b158f1167ed..536210b39428 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
module_param_array(vbi_nr, int, NULL, 0444);
MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor");
-static struct v4l2_capability pvr_capability ={
- .driver = "pvrusb2",
- .card = "Hauppauge WinTV pvr-usb2",
- .bus_info = "usb",
- .version = LINUX_VERSION_CODE,
- .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
- V4L2_CAP_READWRITE),
-};
-
static struct v4l2_fmtdesc pvr_fmtdesc [] = {
{
.index = 0,
@@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver));
strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
sizeof(cap->bus_info));
strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+ V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
+ switch (fh->pdi->devbase.vfl_type) {
+ case VFL_TYPE_GRABBER:
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+ break;
+ case VFL_TYPE_RADIO:
+ cap->device_caps = V4L2_CAP_RADIO;
+ break;
+ }
+ cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
return 0;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index d09a8916e940..bc08a829bc13 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -3146,27 +3146,26 @@ static int vb2_thread(void *data)
prequeue--;
} else {
call_void_qop(q, wait_finish, q);
- ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+ if (!threadio->stop)
+ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
call_void_qop(q, wait_prepare, q);
dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
}
- if (threadio->stop)
- break;
- if (ret)
+ if (ret || threadio->stop)
break;
try_to_freeze();
vb = q->bufs[fileio->b.index];
if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
- ret = threadio->fnc(vb, threadio->priv);
- if (ret)
- break;
+ if (threadio->fnc(vb, threadio->priv))
+ break;
call_void_qop(q, wait_finish, q);
if (set_timestamp)
v4l2_get_timestamp(&fileio->b.timestamp);
- ret = vb2_internal_qbuf(q, &fileio->b);
+ if (!threadio->stop)
+ ret = vb2_internal_qbuf(q, &fileio->b);
call_void_qop(q, wait_prepare, q);
- if (ret)
+ if (ret || threadio->stop)
break;
}
@@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q)
threadio->stop = true;
vb2_internal_streamoff(q, q->type);
call_void_qop(q, wait_prepare, q);
+ err = kthread_stop(threadio->thread);
q->fileio = NULL;
fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
kfree(fileio);
- err = kthread_stop(threadio->thread);
threadio->thread = NULL;
kfree(threadio);
q->fileio = NULL;
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 52a0c2f6264f..ae498b53ee40 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
return ret;
}
- ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+ ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO,
+ da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
if (ret) {
dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index dbdd0faeb6ce..210d1f85679e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
#ifdef CONFIG_PM
static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct rtsx_ucr *ucr =
- (struct rtsx_ucr *)usb_get_intfdata(intf);
-
dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
__func__, message.event);
- /*
- * Call to make sure LED is off during suspend to save more power.
- * It is NOT a permanent state and could be turned on anytime later.
- * Thus no need to call turn_on when resunming.
- */
- mutex_lock(&ucr->dev_mutex);
- rtsx_usb_turn_off_led(ucr);
- mutex_unlock(&ucr->dev_mutex);
-
return 0;
}
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 0d256cb002eb..d6b764349f9d 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
}
EXPORT_SYMBOL_GPL(tps65218_clear_bits);
+static const struct regmap_range tps65218_yes_ranges[] = {
+ regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2),
+ regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS),
+};
+
+static const struct regmap_access_table tps65218_volatile_table = {
+ .yes_ranges = tps65218_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
+};
+
static struct regmap_config tps65218_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65218_volatile_table,
};
static const struct regmap_irq tps65218_irqs[] = {
@@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = {
.num_regs = 2,
.mask_base = TPS65218_REG_INT_MASK1,
+ .status_base = TPS65218_REG_INT1,
};
static const struct of_device_id of_tps65218_match_table[] = {
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f94a9fa60488..c672c4dcffac 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
c_can_irq_control(priv, false);
+ /* put ctrl to init on stop to end ongoing transmission */
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
+
/* deactivate pins */
pinctrl_pm_select_sleep_state(dev->dev.parent);
priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f363972cd77d..e36d10520e24 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
regmap_read(raminit->syscon, raminit->reg, &ctrl);
- /* We clear the done and start bit first. The start bit is
+ /* We clear the start bit first. The start bit is
* looking at the 0 -> transition, but is not self clearing;
- * And we clear the init done bit as well.
* NOTE: DONE must be written with 1 to clear it.
+ * We can't clear the DONE bit here using regmap_update_bits()
+ * as it will bypass the write if initial condition is START:0 DONE:1
+ * e.g. on DRA7 which needs START pulse.
*/
- ctrl &= ~(1 << raminit->bits.start);
- ctrl |= 1 << raminit->bits.done;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ ctrl &= ~mask; /* START = 0, DONE = 0 */
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
- ctrl &= ~(1 << raminit->bits.done);
- c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
+ /* check if START bit is 0. Ignore DONE bit for now
+ * as it can be either 0 or 1.
+ */
+ c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl);
if (enable) {
- /* Set start bit and wait for the done bit. */
+ /* Clear DONE bit & set START bit. */
ctrl |= 1 << raminit->bits.start;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
-
+ /* DONE must be written with 1 to clear it */
+ ctrl |= 1 << raminit->bits.done;
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
+ /* prevent further clearing of DONE bit */
+ ctrl &= ~(1 << raminit->bits.done);
/* clear START bit if start pulse is needed */
if (raminit->needs_pulse) {
ctrl &= ~(1 << raminit->bits.start);
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ regmap_update_bits(raminit->syscon, raminit->reg,
+ mask, ctrl);
}
ctrl |= 1 << raminit->bits.done;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3ec8f6f25e5f..847c1f813261 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -807,10 +807,14 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- if (cm->flags & ~priv->ctrlmode_supported)
+
+ /* check whether changed bits are allowed to be modified */
+ if (cm->mask & ~priv->ctrlmode_supported)
return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= cm->flags;
+ priv->ctrlmode |= (cm->flags & cm->mask);
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
if (priv->ctrlmode & CAN_CTRLMODE_FD)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d7bc462aafdc..244529881be9 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void)
priv->can.data_bittiming_const = &m_can_data_bittiming_const;
priv->can.do_set_mode = m_can_set_mode;
priv->can.do_get_berr_counter = m_can_get_berr_counter;
+
+ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+ priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+
+ /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 541fb7a05625..7af379ca861b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -520,10 +520,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
buf, msg->len,
- kvaser_usb_simple_msg_callback, priv);
+ kvaser_usb_simple_msg_callback, netdev);
usb_anchor_urb(urb, &priv->tx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv = dev->nets[channel];
stats = &priv->netdev->stats;
- if (status & M16C_STATE_BUS_RESET) {
- kvaser_usb_unlink_tx_urbs(priv);
- return;
- }
-
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
- if (status & M16C_STATE_BUS_OFF) {
+ if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.can_stats.bus_off++;
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
}
new_state = CAN_STATE_ERROR_PASSIVE;
- }
-
- if (status == M16C_STATE_BUS_ERROR) {
+ } else if (status & M16C_STATE_BUS_ERROR) {
if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
((txerr >= 96) || (rxerr >= 96))) {
cf->can_id |= CAN_ERR_CRTL;
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv->can.can_stats.error_warning++;
new_state = CAN_STATE_ERROR_WARNING;
- } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
+ ((txerr < 96) && (rxerr < 96))) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
@@ -770,10 +764,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv->can.state = new_state;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
@@ -805,10 +798,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
stats->rx_over_errors++;
stats->rx_errors++;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
}
@@ -887,10 +879,9 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
cf->can_dlc);
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
@@ -1246,6 +1237,9 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+ /* reset tx contexts */
+ kvaser_usb_unlink_tx_urbs(priv);
+
priv->can.state = CAN_STATE_STOPPED;
close_candev(priv->netdev);
@@ -1294,12 +1288,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (!urb) {
netdev_err(netdev, "No memory left for URBs\n");
stats->tx_dropped++;
- goto nourbmem;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
stats->tx_dropped++;
+ dev_kfree_skb(skb);
goto nobufmem;
}
@@ -1334,6 +1330,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
}
}
+ /* This should never happen; it implies a flow control bug */
if (!context) {
netdev_warn(netdev, "cannot find free context\n");
ret = NETDEV_TX_BUSY;
@@ -1364,9 +1361,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (unlikely(err)) {
can_free_echo_skb(netdev, context->echo_index);
- skb = NULL; /* set to NULL to avoid double free in
- * dev_kfree_skb(skb) */
-
atomic_dec(&priv->active_tx_urbs);
usb_unanchor_urb(urb);
@@ -1388,8 +1382,6 @@ releasebuf:
kfree(buf);
nobufmem:
usb_free_urb(urb);
-nourbmem:
- dev_kfree_skb(skb);
return ret;
}
@@ -1502,6 +1494,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
struct kvaser_usb_net_priv *priv;
int i, err;
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
+ if (err)
+ return err;
+
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Cannot alloc candev\n");
@@ -1588,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
{
struct kvaser_usb *dev;
int err = -ENOMEM;
- int i;
+ int i, retry = 3;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -1606,10 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
- for (i = 0; i < MAX_NET_DEVICES; i++)
- kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+ /* On some x86 laptops, plugging a Kvaser device again after
+ * an unplug makes the firmware always ignore the very first
+ * command. For such a case, provide some room for retries
+ * instead of completely exiting the driver.
+ */
+ do {
+ err = kvaser_usb_get_software_info(dev);
+ } while (--retry && err == -ETIMEDOUT);
- err = kvaser_usb_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software infos, error %d\n", err);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 75b08c63d39f..29a09271b64a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -767,16 +767,17 @@
#define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44
#define MTL_Q_RQDR 0x4c
+#define MTL_Q_RQFCR 0x50
#define MTL_Q_IER 0x70
#define MTL_Q_ISR 0x74
/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
#define MTL_Q_RQOMR_EHFC_INDEX 7
#define MTL_Q_RQOMR_EHFC_WIDTH 1
-#define MTL_Q_RQOMR_RFA_INDEX 8
-#define MTL_Q_RQOMR_RFA_WIDTH 3
-#define MTL_Q_RQOMR_RFD_INDEX 13
-#define MTL_Q_RQOMR_RFD_WIDTH 3
#define MTL_Q_RQOMR_RQS_INDEX 16
#define MTL_Q_RQOMR_RQS_WIDTH 9
#define MTL_Q_RQOMR_RSF_INDEX 5
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 53f5f66ec2ee..4c66cd1d1e60 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++) {
/* Activate flow control when less than 4k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
/* De-activate flow control when more than 6k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
}
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 05c6af6c418f..3007d95fbb9f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
bgmac->int_status = 0;
}
- if (handled < weight)
+ if (handled < weight) {
napi_complete(napi);
-
- bgmac_chip_intrs_on(bgmac);
+ bgmac_chip_intrs_on(bgmac);
+ }
return handled;
}
@@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
err = bgmac_mii_register(bgmac);
if (err) {
bgmac_err(bgmac, "Cannot register MDIO\n");
@@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core)
netif_carrier_off(net_dev);
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
-
return 0;
err_mii_unregister:
@@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core)
{
struct bgmac *bgmac = bcma_get_drvdata(core);
- netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
bgmac_mii_unregister(bgmac);
+ netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..e468ed3f210f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
}
#endif
if (!bnx2x_fp_lock_napi(fp))
- return work_done;
+ return budget;
for_each_cos_in_tx_queue(fp, cos)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b29e027c476e..e356afa44e7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
int err;
if (!enic_poll_lock_napi(&enic->rq[rq]))
- return work_done;
+ return budget;
/* Service RQ
*/
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 41a0a5498da7..d48806b5cd88 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4383,8 +4383,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
* is expected to work across all types of IP tunnels once exported. Skyhawk
* supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
- * offloads in hw_enc_features only when a VxLAN port is added. Note this only
- * ensures that other tunnels work fine while VxLAN offloads are not enabled.
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
*
* Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
* adds more than one port, disable offloads and don't re-enable them again
@@ -4463,7 +4464,41 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- return vxlan_features_check(skb, features);
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 l4_hdr = 0;
+
+ /* The code below restricts offload features for some tunneled packets.
+ * Offload features for normal (non tunnel) packets are unchanged.
+ */
+ if (!skb->encapsulation ||
+ !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
+ return features;
+
+ /* It's an encapsulated packet and VxLAN offloads are enabled. We
+ * should disable tunnel offload features if it's not a VxLAN packet,
+ * as tunnel offloads have been enabled only for VxLAN. This is done to
+ * allow other tunneled traffic like GRE work fine while VxLAN
+ * offloads are configured in Skyhawk-R.
+ */
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ if (l4_hdr != IPPROTO_UDP ||
+ skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
}
#endif
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a62fc38f045e..1c75829eb166 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
/*
* RX/TX descriptors.
*/
@@ -362,6 +366,7 @@ struct tx_queue {
dma_addr_t tso_hdrs_dma;
struct tx_desc *tx_desc_area;
+ char *tx_desc_mapping; /* array to track the type of the dma mapping */
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
desc->l4i_chk = 0;
desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
- void *addr;
this_frag = &skb_shinfo(skb)->frags[frag];
- addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
/*
* The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
- desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+ this_frag, 0, desc->byte_cnt,
+ DMA_TO_DEVICE);
}
}
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
if (nr_frags) {
txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
+ char desc_dma_map;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
+ desc_dma_map = txq->tx_desc_mapping[tx_index];
+
cmd_sts = desc->cmd_sts;
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;
- if (!IS_TSO_HEADER(txq, desc->buf_ptr))
- dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+ if (desc_dma_map == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ }
if (cmd_sts & TX_ENABLE_INTERRUPT) {
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
struct tx_queue *txq = mp->txq + index;
struct tx_desc *tx_desc;
int size;
+ int ret;
int i;
txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}
+ txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+ GFP_KERNEL);
+ if (!txq->tx_desc_mapping) {
+ ret = -ENOMEM;
+ goto err_free_desc_area;
+ }
+
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma, GFP_KERNEL);
if (txq->tso_hdrs == NULL) {
- dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
- txq->tx_desc_area, txq->tx_desc_dma);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_desc_mapping;
}
skb_queue_head_init(&txq->tx_skb);
return 0;
+
+err_free_desc_mapping:
+ kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+ if (index == 0 && size <= mp->tx_desc_sram_size)
+ iounmap(txq->tx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return ret;
}
static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
+ kfree(txq->tx_desc_mapping);
+
if (txq->tso_hdrs)
dma_free_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index d0d6dc1b8e46..ac6a8f1eea6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
{
int err;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 03e9eb0dc761..6e08352ec994 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1744,8 +1744,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
- dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS &&
- dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
else
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f5e4b820128b..db0c7a9aee60 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
if (sp->s2io_entries[i].in_use == MSIX_FLG) {
if (sp->s2io_entries[i].type ==
MSIX_RING_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle,
@@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
sp->s2io_entries[i].arg);
} else if (sp->s2io_entries[i].type ==
MSIX_ALARM_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-TX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle,
@@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s: UDP Fragmentation Offload(UFO) enabled\n",
dev->name);
/* Initialize device name */
- sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
+ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
+ sp->product_name);
if (vlan_tag_strip)
sp->vlan_strip_flag = 1;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..c531c8ae1be4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
work_done = netxen_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 37583a9d8853..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc,
};
+static void sh_eth_rcv_snd_disable(struct net_device *ndev);
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -498,6 +501,8 @@ static struct sh_eth_cpu_data r8a779x_data = {
EESR_ECI,
.fdr_value = 0x00000f0f,
+ .trscer_err_mask = DESC_I_RINT8,
+
.apr = 1,
.mpr = 1,
.tpauser = 1,
@@ -538,8 +543,6 @@ static struct sh_eth_cpu_data sh7724_data = {
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
- .trscer_err_mask = DESC_I_RINT8,
-
.apr = 1,
.mpr = 1,
.tpauser = 1,
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
sh_eth_set_receive_align(skb);
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 16 bytes. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
- dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
- DMA_FROM_DEVICE);
- rxdesc->addr = virt_to_phys(skb->data);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[i] = skb;
+ rxdesc->addr = dma_addr;
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- if (start)
+ if (start) {
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ }
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
return ret;
}
+static void sh_eth_dev_exit(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+
+ /* Deactivate all TX descriptors, so DMA should stop at next
+ * packet boundary if it's currently running
+ */
+ for (i = 0; i < mdp->num_tx_ring; i++)
+ mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
+
+ /* Disable TX FIFO egress to MAC */
+ sh_eth_rcv_snd_disable(ndev);
+
+ /* Stop RX DMA at next packet boundary */
+ sh_eth_write(ndev, 0, EDRRR);
+
+ /* Aside from TX DMA, we can't tell when the hardware is
+ * really stopped, so we need to reset to make sure.
+ * Before doing that, wait for long enough to *probably*
+ * finish transmitting the last packet and poll stats.
+ */
+ msleep(2); /* max frame time at 10 Mbps < 1250 us */
+ sh_eth_get_stats(ndev);
+ sh_eth_reset(ndev);
+}
+
/* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev)
{
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
- ALIGN(mdp->rx_buf_sz, 16),
- DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev, rxdesc->addr,
+ ALIGN(mdp->rx_buf_sz, 16),
+ DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
- dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
- rxdesc->addr = virt_to_phys(skb->data);
+ rxdesc->addr = dma_addr;
}
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
@@ -1573,7 +1617,6 @@ ignore_link:
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Abort\n");
}
}
@@ -1592,13 +1635,11 @@ ignore_link:
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++;
- netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
ret = IRQ_HANDLED;
else
- goto other_irq;
+ goto out;
+
+ if (!likely(mdp->irq_enabled)) {
+ sh_eth_write(ndev, 0, EESIPR);
+ goto out;
+ }
if (intr_status & EESR_RX_CHECK) {
if (napi_schedule_prep(&mdp->napi)) {
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
sh_eth_error(ndev, intr_status);
}
-other_irq:
+out:
spin_unlock(&mdp->lock);
return ret;
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* Reenable Rx interrupts */
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (mdp->irq_enabled)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
out:
return budget - quota;
}
@@ -1827,6 +1874,9 @@ static int sh_eth_get_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_ethtool_gset(mdp->phydev, ecmd);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1841,6 +1891,9 @@ static int sh_eth_set_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
/* disable tx and rx */
@@ -1875,6 +1928,9 @@ static int sh_eth_nway_reset(struct net_device *ndev)
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_start_aneg(mdp->phydev);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1959,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return -EINVAL;
if (netif_running(ndev)) {
+ netif_device_detach(ndev);
netif_tx_disable(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
- sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+
+ /* Serialise with the interrupt handler and NAPI, then
+ * disable interrupts. We have to clear the
+ * irq_enabled flag first to ensure that interrupts
+ * won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
- }
+ napi_synchronize(&mdp->napi);
+ sh_eth_write(ndev, 0x0000, EESIPR);
- /* Free all the skbuffs in the Rx queue. */
- sh_eth_ring_free(ndev);
- /* Free DMA buffer */
- sh_eth_free_dma_buffer(mdp);
+ sh_eth_dev_exit(ndev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+ }
/* Set new parameters */
mdp->num_rx_ring = ring->rx_pending;
mdp->num_tx_ring = ring->tx_pending;
- ret = sh_eth_ring_init(ndev);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
- return ret;
- }
- ret = sh_eth_dev_init(ndev, false);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
- return ret;
- }
-
if (netif_running(ndev)) {
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
+ __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
+ __func__);
+ return ret;
+ }
+
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* Setting the Rx mode will start the Rx process. */
sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_wake_queue(ndev);
+ netif_device_attach(ndev);
}
return 0;
@@ -2108,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
@@ -2117,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETH_ZLEN)
- txdesc->buffer_length = ETH_ZLEN;
- else
- txdesc->buffer_length = skb->len;
+ if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txdesc->buffer_length = skb->len;
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2172,24 +2242,26 @@ static int sh_eth_close(struct net_device *ndev)
netif_stop_queue(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
+ /* Serialise with the interrupt handler and NAPI, then disable
+ * interrupts. We have to clear the irq_enabled flag first to
+ * ensure that interrupts won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
+ synchronize_irq(ndev->irq);
+ napi_disable(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+ sh_eth_dev_exit(ndev);
- sh_eth_get_stats(ndev);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
phy_disconnect(mdp->phydev);
+ mdp->phydev = NULL;
}
free_irq(ndev->irq, ndev);
- napi_disable(&mdp->napi);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
@@ -2417,7 +2489,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int i, ret;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return 0;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
@@ -2440,7 +2512,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
@@ -2450,8 +2522,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
}
}
-/* Multicast reception directions set */
-static void sh_eth_set_multicast_list(struct net_device *ndev)
+/* Update promiscuous flag and multicast filter */
+static void sh_eth_set_rx_mode(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 ecmr_bits;
@@ -2462,7 +2534,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
/* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
- ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
+ ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
+ if (mdp->cd->tsu)
+ ecmr_bits |= ECMR_MCT;
if (!(ndev->flags & IFF_MULTICAST)) {
sh_eth_tsu_purge_mcast(ndev);
@@ -2491,9 +2565,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
}
}
}
- } else {
- /* Normal, unicast/broadcast-only mode. */
- ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
}
/* update the ethernet mode */
@@ -2701,6 +2772,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -2713,7 +2785,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
- .ndo_set_rx_mode = sh_eth_set_multicast_list,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 71f5de1171bd..332d3c16d483 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -513,6 +513,7 @@ struct sh_eth_private {
u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
+ bool irq_enabled;
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 698494481d18..b1a271853d85 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -474,13 +474,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
/* allocate memory for RX skbuff array */
rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
sizeof(dma_addr_t), GFP_KERNEL);
- if (rx_ring->rx_skbuff_dma == NULL)
- goto dmamem_err;
+ if (!rx_ring->rx_skbuff_dma) {
+ dma_free_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ rx_ring->dma_rx, rx_ring->dma_rx_phy);
+ goto error;
+ }
rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
sizeof(struct sk_buff *), GFP_KERNEL);
- if (rx_ring->rx_skbuff == NULL)
- goto rxbuff_err;
+ if (!rx_ring->rx_skbuff) {
+ kfree(rx_ring->rx_skbuff_dma);
+ goto error;
+ }
/* initialise the buffers */
for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
@@ -502,13 +508,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
err_init_rx_buffers:
while (--desc_index >= 0)
free_rx_ring(priv->device, rx_ring, desc_index);
- kfree(rx_ring->rx_skbuff);
-rxbuff_err:
- kfree(rx_ring->rx_skbuff_dma);
-dmamem_err:
- dma_free_coherent(priv->device,
- rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
- rx_ring->dma_rx, rx_ring->dma_rx_phy);
error:
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 866560ea9e18..b02eed12bfc5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
}
- /* Get MAC address if available (DT) */
- if (mac)
- ether_addr_copy(priv->dev->dev_addr, mac);
-
priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed\n", __func__);
@@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
goto err_drv_remove;
}
+ /* Get MAC address if available (DT) */
+ if (mac)
+ ether_addr_copy(priv->dev->dev_addr, mac);
+
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c6b7c1651e5..cf62ff4c8c56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * on success the new private structure is returned, otherwise the error
+ * pointer.
*/
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
SET_NETDEV_DEV(ndev, device);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 64d1cef4cda1..a39131f494ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1634,16 +1634,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unsigned short vid)
{
int ret;
- int unreg_mcast_mask;
+ int unreg_mcast_mask = 0;
+ u32 port_mask;
- if (priv->ndev->flags & IFF_ALLMULTI)
- unreg_mcast_mask = ALE_ALL_PORTS;
- else
- unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ if (priv->data.dual_emac) {
+ port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
- ret = cpsw_ale_add_vlan(priv->ale, vid,
- ALE_ALL_PORTS << priv->host_port,
- 0, ALE_ALL_PORTS << priv->host_port,
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = ALE_ALL_PORTS;
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ }
+
+ ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
unreg_mcast_mask << priv->host_port);
if (ret != 0)
return ret;
@@ -1654,8 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
goto clean_vid;
ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- ALE_ALL_PORTS << priv->host_port,
- ALE_VLAN, vid, 0);
+ port_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
@@ -1676,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
return cpsw_add_vlan_ale_entry(priv, vid);
}
@@ -1689,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
if (ret != 0)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ea712512c7d1..5fae4354722c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -62,6 +62,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
@@ -343,9 +344,7 @@ struct emac_priv {
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
const char *phy_id;
-#ifdef CONFIG_OF
struct device_node *phy_node;
-#endif
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -922,6 +921,16 @@ static void emac_int_disable(struct emac_priv *priv)
if (priv->int_disable)
priv->int_disable();
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
local_irq_restore(flags);
} else {
@@ -951,15 +960,6 @@ static void emac_int_enable(struct emac_priv *priv)
* register */
/* NOTE: Rx Threshold and Misc interrupts are not enabled */
-
- /* ack rxen only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_RXEN);
-
- /* ack txen- only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_TXEN);
-
} else {
/* Set DM644x control registers for interrupt control */
emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
@@ -1537,7 +1537,13 @@ static int emac_dev_open(struct net_device *ndev)
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
- pm_runtime_get(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, ret);
+ return ret;
+ }
netif_carrier_off(ndev);
for (cnt = 0; cnt < ETH_ALEN; cnt++)
@@ -1596,8 +1602,20 @@ static int emac_dev_open(struct net_device *ndev)
cpdma_ctlr_start(priv->dma);
priv->phydev = NULL;
+
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!priv->phydev) {
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_node->full_name);
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
/* use the first phy on the bus if pdata did not give us a phy id */
- if (!priv->phy_id) {
+ if (!priv->phydev && !priv->phy_id) {
struct device *phy;
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1606,7 +1624,7 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id = dev_name(phy);
}
- if (priv->phy_id && *priv->phy_id) {
+ if (!priv->phydev && priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
&emac_adjust_link,
PHY_INTERFACE_MODE_MII);
@@ -1627,7 +1645,9 @@ static int emac_dev_open(struct net_device *ndev)
"(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
- } else {
+ }
+
+ if (!priv->phydev) {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
@@ -1724,6 +1744,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
struct emac_priv *priv = netdev_priv(ndev);
u32 mac_control;
u32 stats_clear_mask;
+ int err;
+
+ err = pm_runtime_get_sync(&priv->pdev->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return &ndev->stats;
+ }
/* update emac hardware stats and reset the registers*/
@@ -1766,6 +1795,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+ pm_runtime_put(&priv->pdev->dev);
+
return &ndev->stats;
}
@@ -1859,7 +1890,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
static int davinci_emac_probe(struct platform_device *pdev)
{
int rc = 0;
- struct resource *res;
+ struct resource *res, *res_ctrl;
struct net_device *ndev;
struct emac_priv *priv;
unsigned long hw_ram_addr;
@@ -1876,6 +1907,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
+ devm_clk_put(&pdev->dev, emac_clk);
/* TODO: Probe PHY here if possible */
@@ -1917,11 +1949,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
rc = PTR_ERR(priv->remap_addr);
goto no_pdata;
}
+
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_ctrl) {
+ priv->ctrl_base =
+ devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(priv->ctrl_base))
+ goto no_pdata;
+ } else {
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+ }
+
priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
ndev->base_addr = (unsigned long)priv->remap_addr;
- priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
-
hw_ram_addr = pdata->hw_ram_addr;
if (!hw_ram_addr)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
@@ -1980,12 +2021,22 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_get_sync(&pdev->dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, rc);
+ goto no_cpdma_chan;
+ }
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
+ pm_runtime_put(&pdev->dev);
goto no_cpdma_chan;
}
@@ -1995,9 +2046,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
return 0;
@@ -2071,9 +2120,14 @@ static const struct emac_platform_data am3517_emac_data = {
.hw_ram_addr = 0x01e20000,
};
+static const struct emac_platform_data dm816_emac_data = {
+ .version = EMAC_VERSION_2,
+};
+
static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", },
{.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
+ {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index a14d87783245..2e195289ddf4 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
};
dst = ip6_route_output(dev_net(dev), NULL, &fl6);
- if (IS_ERR(dst))
+ if (dst->error) {
+ ret = dst->error;
+ dst_release(dst);
goto err;
-
+ }
skb_dst_drop(skb);
skb_dst_set(skb, dst);
err = ip6_local_out(skb);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 57ec23e8ccfa..bf405f134d3a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data)
ocp_reg_write(tp, OCP_SRAM_DATA, data);
}
-static u16 sram_read(struct r8152 *tp, u16 addr)
-{
- ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
- return ocp_reg_read(tp, OCP_SRAM_DATA);
-}
-
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -2518,24 +2506,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = ocp_reg_read(tp, OCP_POWER_CFG);
data |= EN_10M_PLLOFF;
ocp_reg_write(tp, OCP_POWER_CFG, data);
- data = sram_read(tp, SRAM_IMPEDANCE);
- data &= ~RX_DRIVING_MASK;
- sram_write(tp, SRAM_IMPEDANCE, data);
+ sram_write(tp, SRAM_IMPEDANCE, 0x0b13);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= PFM_PWM_SWITCH;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
- data = sram_read(tp, SRAM_LPF_CFG);
- data |= LPF_AUTO_TUNE;
- sram_write(tp, SRAM_LPF_CFG, data);
+ /* Enable LPF corner auto tune */
+ sram_write(tp, SRAM_LPF_CFG, 0xf70f);
- data = sram_read(tp, SRAM_10M_AMP1);
- data |= GDAC_IB_UPALL;
- sram_write(tp, SRAM_10M_AMP1, data);
- data = sram_read(tp, SRAM_10M_AMP2);
- data |= AMP_DN;
- sram_write(tp, SRAM_10M_AMP2, data);
+ /* Adjust 10M Amplitude */
+ sram_write(tp, SRAM_10M_AMP1, 0x00af);
+ sram_write(tp, SRAM_10M_AMP2, 0x0208);
set_bit(PHY_RESET, &tp->flags);
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9a72640237cb..62b0bf4fdf6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
__ath_cancel_work(sc);
+ disable_irq(sc->irq);
tasklet_disable(&sc->intr_tq);
tasklet_disable(&sc->bcon_tasklet);
spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
r = -EIO;
out:
+ enable_irq(sc->irq);
spin_unlock_bh(&sc->sc_pcu_lock);
tasklet_enable(&sc->bcon_tasklet);
tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
- if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
- return IRQ_NONE;
-
/* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
- if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return IRQ_HANDLED;
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 1bbe4fc47b97..660ddb1b7d8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
* regardless of the band or the number of the probes. FW will calculate
* the actual dwell time.
+ * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
+ IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
};
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 201846de94e7..cfc0e65b34a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
};
/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
- * @flags: enum iwl_scan_channel_flgs
- * @non_ebs_ratio: how many regular scan iteration before EBS
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
*/
struct iwl_scan_channel_opt {
__le16 flags;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e880f9d4717b..20915587c820 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
msk |= mvmsta->tfd_queue_msk;
}
- if (drop) {
- if (iwl_mvm_flush_tx_path(mvm, msk, true))
- IWL_ERR(mvm, "flush request fail\n");
- mutex_unlock(&mvm->mutex);
- } else {
- mutex_unlock(&mvm->mutex);
+ msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
- /* this can take a while, and we may need/want other operations
- * to succeed while doing this, so do it without the mutex held
- */
- iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
- }
+ if (iwl_mvm_flush_tx_path(mvm, msk, true))
+ IWL_ERR(mvm, "flush request fail\n");
+ mutex_unlock(&mvm->mutex);
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ */
+ iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index ec9a8e7bae1d..844bf7c4c8de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,6 +72,8 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
struct iwl_mvm_scan_params {
u32 max_out_time;
@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify);
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm))
+ goto out;
+
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->iter_num = cpu_to_le32(1);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful) {
- cmd->channel_opt[0].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[1].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- }
-
if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0;
cmd->schedule[1].full_scan_mul = 0;
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
for (i = 1; i <= req->req.n_ssids; i++)
ssid_bitmap |= BIT(i);
@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0xff;
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4333306ccdee..c59d07567d90 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_probe_resp(fc))
tx_flags |= TX_CMD_FLG_TSF;
- else if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ } else if (ieee80211_is_back_req(fc)) {
+ struct ieee80211_bar *bar = (void *)skb->data;
+ u16 control = le16_to_cpu(bar->control);
+
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+ tx_cmd->tid_tspec = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+ WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index ea63fbd228ed..352b4f28f82c 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
ret = of_overlay_apply_one(ov, tchild, child);
if (ret)
return ret;
-
- /* The properties are already copied, now do the child nodes */
- for_each_child_of_node(child, grandchild) {
- ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
- if (ret) {
- pr_err("%s: Failed to apply single node @%s/%s\n",
- __func__, tchild->full_name,
- grandchild->name);
- return ret;
- }
- }
}
return ret;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b33c6a21807..b0d50d70a8a1 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev)
size = dev->coherent_dma_mask;
} else {
offset = PFN_DOWN(paddr - dma_addr);
- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
dev->dma_pfn_offset = offset;
@@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb,
if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
return NOTIFY_OK; /* not for us */
+ /* already populated? (driver using of_populate manually) */
+ if (of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
@@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb,
break;
case OF_RECONFIG_CHANGE_REMOVE:
+
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* find our device by node */
pdev = of_find_device_by_node(rd->dn);
if (pdev == NULL)
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 75976da22b2e..a2b687d5f324 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -176,5 +176,60 @@
};
};
+ overlay10 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest10 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <10>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest101 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
+
+ overlay11 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest11 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <11>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest111 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 844838e11ef1..41a4a138f53b 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev)
}
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
return 0;
}
@@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void)
selftest(1, "overlay test %d passed\n", 8);
}
+/* test insertion of a bus with parent devices */
+static void of_selftest_overlay_10(void)
+{
+ int ret;
+ char *child_path;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(10, 10, 0, 1);
+ if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10))
+ return;
+
+ child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
+ selftest_path(10));
+ if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
+ return;
+
+ ret = of_path_platform_device_exists(child_path);
+ kfree(child_path);
+ if (selftest(ret, "overlay test %d failed; no child device\n", 10))
+ return;
+}
+
+/* test insertion of a bus with parent devices (and revert) */
+static void of_selftest_overlay_11(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1);
+ if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11))
+ return;
+}
+
static void __init of_selftest_overlay(void)
{
struct device_node *bus_np = NULL;
@@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void)
of_selftest_overlay_6();
of_selftest_overlay_8();
+ of_selftest_overlay_10();
+ of_selftest_overlay_11();
+
out:
of_node_put(bus_np);
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 37e71ff6408d..dceb9ddfd99a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus)
int i;
/* PCI-PCI Bridge */
pci_read_bridge_bases(bus);
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
- pci_claim_resource(bus->self, i);
- }
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
+ pci_claim_bridge_resource(bus->self, i);
} else {
/* Host-PCI Bridge */
int err;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 73aef51a28f0..8fb16188cd82 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
}
EXPORT_SYMBOL(pci_bus_alloc_resource);
+/*
+ * The @idx resource of @dev should be a PCI-PCI bridge window. If this
+ * resource fits inside a window of an upstream bridge, do nothing. If it
+ * overlaps an upstream window but extends outside it, clip the resource so
+ * it fits completely inside.
+ */
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+{
+ struct pci_bus *bus = dev->bus;
+ struct resource *res = &dev->resource[idx];
+ struct resource orig_res = *res;
+ struct resource *r;
+ int i;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t start, end;
+
+ if (!r)
+ continue;
+
+ if (resource_type(res) != resource_type(r))
+ continue;
+
+ start = max(r->start, res->start);
+ end = min(r->end, res->end);
+
+ if (start > end)
+ continue; /* no overlap */
+
+ if (res->start == start && res->end == end)
+ return false; /* no change */
+
+ res->start = start;
+ res->end = end;
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+ &orig_res, res);
+
+ return true;
+ }
+
+ return false;
+}
+
void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cab05f31223f..e9d4fd861ba1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
+ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
+ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (dev->subordinate || !dev->slot)
+ if (dev->subordinate || !dev->slot ||
+ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_try_reset_function);
+/* Do any devices on or below this bus prevent a bus reset? */
+static bool pci_bus_resetable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_bus_lock(struct pci_bus *bus)
{
@@ -3607,6 +3623,22 @@ unlock:
return 0;
}
+/* Do any devices on or below this slot prevent a bus reset? */
+static bool pci_slot_resetable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_slot_lock(struct pci_slot *slot)
{
@@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
{
int rc;
- if (!slot)
+ if (!slot || !pci_slot_resetable(slot))
return -ENOTTY;
if (!probe)
@@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
- if (!bus->self)
+ if (!bus->self || !pci_bus_resetable(bus))
return -ENOTTY;
if (probe)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 8aff29a804ff..d54632a1db43 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head);
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
/**
* pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ed6f89b6efe5..e52356aa09b8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
quirk_broken_intx_masking);
+static void quirk_no_bus_reset(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+}
+
+/*
+ * Atheros AR93xx chips do not behave after a bus reset. The device will
+ * throw a Link Down error on AER-capable systems and regardless of AER,
+ * config space of the device is never accessible again and typically
+ * causes the system to hang or reset when access is attempted.
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 0482235eee92..e3e17f3c0f0f 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
config space writes, so it's quite possible that an I/O window of
the bridge will have some undesirable address (e.g. 0) after the
first write. Ditto 64-bit prefetchable MMIO. */
-static void pci_setup_bridge_io(struct pci_bus *bus)
+static void pci_setup_bridge_io(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
unsigned long io_mask;
@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
io_mask = PCI_IO_1K_RANGE_MASK;
/* Set up the top and bottom of the PCI I/O segment for this bus. */
- res = bus->resource[0];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
-static void pci_setup_bridge_mmio(struct pci_bus *bus)
+static void pci_setup_bridge_mmio(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus. */
- res = bus->resource[1];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
-static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
+static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l, bu, lu;
@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
/* Set up PREF base/limit. */
bu = lu = 0;
- res = bus->resource[2];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
&bus->busn_res);
if (type & IORESOURCE_IO)
- pci_setup_bridge_io(bus);
+ pci_setup_bridge_io(bridge);
if (type & IORESOURCE_MEM)
- pci_setup_bridge_mmio(bus);
+ pci_setup_bridge_mmio(bridge);
if (type & IORESOURCE_PREFETCH)
- pci_setup_bridge_mmio_pref(bus);
+ pci_setup_bridge_mmio_pref(bridge);
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
__pci_setup_bridge(bus, type);
}
+
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
+{
+ if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
+ return 0;
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed the window */
+
+ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return 0;
+
+ if (!pci_bus_clip_resource(bridge, i))
+ return -EINVAL; /* clipping didn't change anything */
+
+ switch (i - PCI_BRIDGE_RESOURCES) {
+ case 0:
+ pci_setup_bridge_io(bridge);
+ break;
+ case 1:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case 2:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed a smaller window */
+
+ return -EINVAL;
+}
+
/* Check whether the bridge supports optional I/O and
prefetchable memory ranges. If not, the respective
base/limit registers must be read-only and read as 0. */
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e4f65510c87e..89dca77ca038 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
if (pctldev == NULL)
return;
- mutex_lock(&pinctrldev_list_mutex);
mutex_lock(&pctldev->mutex);
-
pinctrl_remove_device_debugfs(pctldev);
+ mutex_unlock(&pctldev->mutex);
if (!IS_ERR(pctldev->p))
pinctrl_put(pctldev->p);
+ mutex_lock(&pinctrldev_list_mutex);
+ mutex_lock(&pctldev->mutex);
/* TODO: check that no pinmuxes are still active? */
list_del(&pctldev->node);
/* Destroy descriptor tree */
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index dfd021e8268f..f4cd0b9b2438 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -177,7 +177,7 @@ struct at91_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
- int nbanks;
+ int nactive_banks;
uint32_t *mux_mask;
int nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
int mux;
/* check if it's a valid config */
- if (pin->bank >= info->nbanks) {
+ if (pin->bank >= gpio_banks) {
dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
- name, index, pin->bank, info->nbanks);
+ name, index, pin->bank, gpio_banks);
return -EINVAL;
}
+ if (!gpio_chips[pin->bank]) {
+ dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+ name, index, pin->bank);
+ return -ENXIO;
+ }
+
if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
for_each_child_of_node(np, child) {
if (of_device_is_compatible(child, gpio_compat)) {
- info->nbanks++;
+ if (of_device_is_available(child))
+ info->nactive_banks++;
} else {
info->nfunctions++;
info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
}
size /= sizeof(*list);
- if (!size || size % info->nbanks) {
- dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+ if (!size || size % gpio_banks) {
+ dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
return -EINVAL;
}
- info->nmux = size / info->nbanks;
+ info->nmux = size / gpio_banks;
info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
at91_pinctrl_child_count(info, np);
- if (info->nbanks < 1) {
+ if (gpio_banks < 1) {
dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
return -EINVAL;
}
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
dev_dbg(&pdev->dev, "mux-mask\n");
tmp = info->mux_mask;
- for (i = 0; i < info->nbanks; i++) {
+ for (i = 0; i < gpio_banks; i++) {
for (j = 0; j < info->nmux; j++, tmp++) {
dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
}
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->groups)
return -ENOMEM;
- dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+ dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
{
struct at91_pinctrl *info;
struct pinctrl_pin_desc *pdesc;
- int ret, i, j, k;
+ int ret, i, j, k, ngpio_chips_enabled = 0;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
* to obtain references to the struct gpio_chip * for them, and we
* need this to proceed.
*/
- for (i = 0; i < info->nbanks; i++) {
- if (!gpio_chips[i]) {
- dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- devm_kfree(&pdev->dev, info);
- return -EPROBE_DEFER;
- }
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ ngpio_chips_enabled++;
+
+ if (ngpio_chips_enabled < info->nactive_banks) {
+ dev_warn(&pdev->dev,
+ "All GPIO chips are not registered yet (%d/%d)\n",
+ ngpio_chips_enabled, info->nactive_banks);
+ devm_kfree(&pdev->dev, info);
+ return -EPROBE_DEFER;
}
at91_pinctrl_desc.name = dev_name(&pdev->dev);
- at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+ at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
at91_pinctrl_desc.pins = pdesc =
devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
if (!at91_pinctrl_desc.pins)
return -ENOMEM;
- for (i = 0 , k = 0; i < info->nbanks; i++) {
+ for (i = 0, k = 0; i < gpio_banks; i++) {
for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
pdesc->number = k;
pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- for (i = 0; i < info->nbanks; i++)
- pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *at91_gpio)
{
+ struct gpio_chip *gpiochip_prev = NULL;
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
- int ret;
+ int ret, i;
at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
return ret;
}
- /* Setup chained handler */
- if (at91_gpio->pioc_idx)
- prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
/* The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
- if (prev && prev->next == at91_gpio)
+ gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+ if (!gpiochip_prev) {
+ /* Then register the chain on the parent IRQ */
+ gpiochip_set_chained_irqchip(&at91_gpio->chip,
+ &gpio_irqchip,
+ at91_gpio->pioc_virq,
+ gpio_irq_handler);
return 0;
+ }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- &gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
- return 0;
+ /* we can only have 2 banks before */
+ for (i = 0; i < 2; i++) {
+ if (prev->next) {
+ prev = prev->next;
+ } else {
+ prev->next = at91_gpio;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
/* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
.ngpio = MAX_NB_GPIO_PER_BANK,
};
-static void at91_gpio_probe_fixup(void)
-{
- unsigned i;
- struct at91_gpio_chip *at91_gpio, *last = NULL;
-
- for (i = 0; i < gpio_banks; i++) {
- at91_gpio = gpio_chips[i];
-
- /*
- * GPIO controller are grouped on some SoC:
- * PIOC, PIOD and PIOE can share the same IRQ line
- */
- if (last && last->pioc_virq == at91_gpio->pioc_virq)
- last->next = at91_gpio;
- last = at91_gpio;
- }
-}
-
static struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- at91_gpio_probe_fixup();
-
ret = at91_gpio_of_irq_setup(pdev, at91_chip);
if (ret)
goto irq_setup_err;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 3c22dbebc80f..43eacc924b7e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1398,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
- u32 polarity = 0, data = 0;
u32 pend;
- bool edge_changed = false;
- unsigned long flags;
dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1409,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
- if (bank->toggle_edge_mode) {
- polarity = readl_relaxed(bank->reg_base +
- GPIO_INT_POLARITY);
- data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
- }
-
while (pend) {
unsigned int virq;
@@ -1434,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
* needs manual intervention.
*/
if (bank->toggle_edge_mode & BIT(irq)) {
- if (data & BIT(irq))
- polarity &= ~BIT(irq);
- else
- polarity |= BIT(irq);
+ u32 data, data_old, polarity;
+ unsigned long flags;
- edge_changed = true;
- }
+ data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
+ do {
+ spin_lock_irqsave(&bank->slock, flags);
- generic_handle_irq(virq);
- }
+ polarity = readl_relaxed(bank->reg_base +
+ GPIO_INT_POLARITY);
+ if (data & BIT(irq))
+ polarity &= ~BIT(irq);
+ else
+ polarity |= BIT(irq);
+ writel(polarity,
+ bank->reg_base + GPIO_INT_POLARITY);
- if (bank->toggle_edge_mode && edge_changed) {
- /* Interrupt params should only be set with ints disabled */
- spin_lock_irqsave(&bank->slock, flags);
+ spin_unlock_irqrestore(&bank->slock, flags);
- data = readl_relaxed(bank->reg_base + GPIO_INTEN);
- writel_relaxed(0, bank->reg_base + GPIO_INTEN);
- writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
- writel(data, bank->reg_base + GPIO_INTEN);
+ data_old = data;
+ data = readl_relaxed(bank->reg_base +
+ GPIO_EXT_PORT);
+ } while ((data & BIT(irq)) != (data_old & BIT(irq)));
+ }
- spin_unlock_irqrestore(&bank->slock, flags);
+ generic_handle_irq(virq);
}
chained_irq_exit(chip, desc);
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index c5cef59f5965..779950c62e53 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* load the gpio chip */
xway_chip.dev = &pdev->dev;
- of_gpiochip_add(&xway_chip);
ret = gpiochip_add(&xway_chip);
if (ret) {
- of_gpiochip_remove(&xway_chip);
dev_err(&pdev->dev, "Failed to register gpio chip\n");
return ret;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e730935fa457..ed7017df065d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
{
- int i = 0;
+ int i;
const struct msm_function *func = pctrl->soc->functions;
- for (; i <= pctrl->soc->nfunctions; i++)
+ for (i = 0; i < pctrl->soc->nfunctions; i++)
if (!strcmp(func[i].name, "ps_hold")) {
pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
pctrl->restart_nb.priority = 128;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 9411eae39a4e..3d21efe11d7b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,11 +2,9 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
- * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package:
- * Copyright (C) 2005-2014 Dell Inc.
+ * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
+ * Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -34,13 +32,6 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -71,13 +62,6 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
-
- int needs_kbd_timeouts;
- /*
- * Ordered list of timeouts expressed in seconds.
- * The list must end with -1
- */
- int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
-/*
- * These values come from Windows utility provided by Dell. If any other value
- * is used then BIOS silently set timeout to 0 without any error message.
- */
-static struct quirk_entry quirk_dell_xps13_9333 = {
- .needs_kbd_timeouts = 1,
- .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
-};
-
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
- {
- .callback = dmi_matched,
- .ident = "Dell XPS13 9333",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
- },
- .driver_data = &quirk_dell_xps13_9333,
- },
{ }
};
@@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_id(int tokenid)
+static int find_token_location(int tokenid)
{
int i;
-
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return i;
+ return da_tokens[i].location;
}
return -1;
}
-static int find_token_location(int tokenid)
-{
- int id;
-
- id = find_token_id(tokenid);
- if (id == -1)
- return -1;
-
- return da_tokens[id].location;
-}
-
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
-static inline int dell_smi_error(int value)
-{
- switch (value) {
- case 0: /* Completed successfully */
- return 0;
- case -1: /* Completed with error */
- return -EIO;
- case -2: /* Function not supported */
- return -ENXIO;
- default: /* Unknown error */
- return -EINVAL;
- }
-}
-
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
- out:
+out:
release_buffer();
return ret;
}
@@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
- out:
+out:
release_buffer();
return ret;
}
@@ -849,984 +789,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-/*
- * Derived from information in smbios-keyboard-ctl:
- *
- * cbClass 4
- * cbSelect 11
- * Keyboard illumination
- * cbArg1 determines the function to be performed
- *
- * cbArg1 0x0 = Get Feature Information
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of user-selectable modes
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * cbRES2, byte2 Reserved for future use
- * cbRES2, byte3 Keyboard illumination type
- * 0 Reserved
- * 1 Tasklight
- * 2 Backlight
- * 3-255 Reserved for future use
- * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES3, byte1 Supported timeout unit bitmap
- * bit 0 Seconds
- * bit 1 Minutes
- * bit 2 Hours
- * bit 3 Days
- * bits 4-7 Reserved for future use
- * cbRES3, byte2 Number of keyboard light brightness levels
- * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
- * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
- * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
- * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
- *
- * cbArg1 0x1 = Get Current State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbRES2, byte2 Currently active auto keyboard illumination triggers.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES2, byte3 Current Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
- * are set upon return from the [Get feature information] call.
- * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
- * cbRES3, byte1 Current ALS reading
- * cbRES3, byte2 Current keyboard light level.
- *
- * cbArg1 0x2 = Set New State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbArg2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
- * keyboard to turn off automatically.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbArg2, byte3 Desired Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
- * cbArg3, byte2 Desired keyboard light level.
- */
-
-
-enum kbd_timeout_unit {
- KBD_TIMEOUT_SECONDS = 0,
- KBD_TIMEOUT_MINUTES,
- KBD_TIMEOUT_HOURS,
- KBD_TIMEOUT_DAYS,
-};
-
-enum kbd_mode_bit {
- KBD_MODE_BIT_OFF = 0,
- KBD_MODE_BIT_ON,
- KBD_MODE_BIT_ALS,
- KBD_MODE_BIT_TRIGGER_ALS,
- KBD_MODE_BIT_TRIGGER,
- KBD_MODE_BIT_TRIGGER_25,
- KBD_MODE_BIT_TRIGGER_50,
- KBD_MODE_BIT_TRIGGER_75,
- KBD_MODE_BIT_TRIGGER_100,
-};
-
-#define kbd_is_als_mode_bit(bit) \
- ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
-#define kbd_is_trigger_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-#define kbd_is_level_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-
-struct kbd_info {
- u16 modes;
- u8 type;
- u8 triggers;
- u8 levels;
- u8 seconds;
- u8 minutes;
- u8 hours;
- u8 days;
-};
-
-struct kbd_state {
- u8 mode_bit;
- u8 triggers;
- u8 timeout_value;
- u8 timeout_unit;
- u8 als_setting;
- u8 als_value;
- u8 level;
-};
-
-static const int kbd_tokens[] = {
- KBD_LED_OFF_TOKEN,
- KBD_LED_AUTO_25_TOKEN,
- KBD_LED_AUTO_50_TOKEN,
- KBD_LED_AUTO_75_TOKEN,
- KBD_LED_AUTO_100_TOKEN,
- KBD_LED_ON_TOKEN,
-};
-
-static u16 kbd_token_bits;
-
-static struct kbd_info kbd_info;
-static bool kbd_als_supported;
-static bool kbd_triggers_supported;
-
-static u8 kbd_mode_levels[16];
-static int kbd_mode_levels_count;
-
-static u8 kbd_previous_level;
-static u8 kbd_previous_mode_bit;
-
-static bool kbd_led_present;
-
-/*
- * NOTE: there are three ways to set the keyboard backlight level.
- * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
- * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
- * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
- *
- * There are laptops which support only one of these methods. If we want to
- * support as many machines as possible we need to implement all three methods.
- * The first two methods use the kbd_state structure. The third uses SMBIOS
- * tokens. If kbd_info.levels == 0, the machine does not support setting the
- * keyboard backlight level via kbd_state.level.
- */
-
-static int kbd_get_info(struct kbd_info *info)
-{
- u8 units;
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x0;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- info->modes = buffer->output[1] & 0xFFFF;
- info->type = (buffer->output[1] >> 24) & 0xFF;
- info->triggers = buffer->output[2] & 0xFF;
- units = (buffer->output[2] >> 8) & 0xFF;
- info->levels = (buffer->output[2] >> 16) & 0xFF;
-
- if (units & BIT(0))
- info->seconds = (buffer->output[3] >> 0) & 0xFF;
- if (units & BIT(1))
- info->minutes = (buffer->output[3] >> 8) & 0xFF;
- if (units & BIT(2))
- info->hours = (buffer->output[3] >> 16) & 0xFF;
- if (units & BIT(3))
- info->days = (buffer->output[3] >> 24) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static unsigned int kbd_get_max_level(void)
-{
- if (kbd_info.levels != 0)
- return kbd_info.levels;
- if (kbd_mode_levels_count > 0)
- return kbd_mode_levels_count - 1;
- return 0;
-}
-
-static int kbd_get_level(struct kbd_state *state)
-{
- int i;
-
- if (kbd_info.levels != 0)
- return state->level;
-
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < kbd_mode_levels_count; ++i)
- if (kbd_mode_levels[i] == state->mode_bit)
- return i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_set_level(struct kbd_state *state, u8 level)
-{
- if (kbd_info.levels != 0) {
- if (level != 0)
- kbd_previous_level = level;
- if (state->level == level)
- return 0;
- state->level = level;
- if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
- state->mode_bit = kbd_previous_mode_bit;
- else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit = state->mode_bit;
- state->mode_bit = KBD_MODE_BIT_OFF;
- }
- return 0;
- }
-
- if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
- if (level != 0)
- kbd_previous_level = level;
- state->mode_bit = kbd_mode_levels[level];
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_get_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x1;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
- if (state->mode_bit != 0)
- state->mode_bit--;
-
- state->triggers = (buffer->output[1] >> 16) & 0xFF;
- state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
- state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
- state->als_setting = buffer->output[2] & 0xFF;
- state->als_value = (buffer->output[2] >> 8) & 0xFF;
- state->level = (buffer->output[2] >> 16) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static int kbd_set_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
- buffer->input[0] = 0x2;
- buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
- buffer->input[1] |= (state->triggers & 0xFF) << 16;
- buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
- buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
- buffer->input[2] = state->als_setting & 0xFF;
- buffer->input[2] |= (state->level & 0xFF) << 16;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
-{
- int ret;
-
- ret = kbd_set_state(state);
- if (ret == 0)
- return 0;
-
- /*
- * When setting the new state fails,try to restore the previous one.
- * This is needed on some machines where BIOS sets a default state when
- * setting a new state fails. This default state could be all off.
- */
-
- if (kbd_set_state(old))
- pr_err("Setting old previous keyboard state failed\n");
-
- return ret;
-}
-
-static int kbd_set_token_bit(u8 bit)
-{
- int id;
- int ret;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- buffer->input[1] = da_tokens[id].value;
- dell_send_request(buffer, 1, 0);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_get_token_bit(u8 bit)
-{
- int id;
- int ret;
- int val;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- dell_send_request(buffer, 0, 0);
- ret = buffer->output[0];
- val = buffer->output[1];
- release_buffer();
-
- if (ret)
- return dell_smi_error(ret);
-
- return (val == da_tokens[id].value);
-}
-
-static int kbd_get_first_active_token_bit(void)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
- ret = kbd_get_token_bit(i);
- if (ret == 1)
- return i;
- }
-
- return ret;
-}
-
-static int kbd_get_valid_token_counts(void)
-{
- return hweight16(kbd_token_bits);
-}
-
-static inline int kbd_init_info(void)
-{
- struct kbd_state state;
- int ret;
- int i;
-
- ret = kbd_get_info(&kbd_info);
- if (ret)
- return ret;
-
- kbd_get_state(&state);
-
- /* NOTE: timeout value is stored in 6 bits so max value is 63 */
- if (kbd_info.seconds > 63)
- kbd_info.seconds = 63;
- if (kbd_info.minutes > 63)
- kbd_info.minutes = 63;
- if (kbd_info.hours > 63)
- kbd_info.hours = 63;
- if (kbd_info.days > 63)
- kbd_info.days = 63;
-
- /* NOTE: On tested machines ON mode did not work and caused
- * problems (turned backlight off) so do not use it
- */
- kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
-
- kbd_previous_level = kbd_get_level(&state);
- kbd_previous_mode_bit = state.mode_bit;
-
- if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
- kbd_previous_level = 1;
-
- if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit =
- ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
- if (kbd_previous_mode_bit != 0)
- kbd_previous_mode_bit--;
- }
-
- if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
- BIT(KBD_MODE_BIT_TRIGGER_ALS)))
- kbd_als_supported = true;
-
- if (kbd_info.modes & (
- BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
- BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
- BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
- ))
- kbd_triggers_supported = true;
-
- /* kbd_mode_levels[0] is reserved, see below */
- for (i = 0; i < 16; ++i)
- if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
- kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
-
- /*
- * Find the first supported mode and assign to kbd_mode_levels[0].
- * This should be 0 (off), but we cannot depend on the BIOS to
- * support 0.
- */
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < 16; ++i) {
- if (BIT(i) & kbd_info.modes) {
- kbd_mode_levels[0] = i;
- break;
- }
- }
- kbd_mode_levels_count++;
- }
-
- return 0;
-
-}
-
-static inline void kbd_init_tokens(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
- if (find_token_id(kbd_tokens[i]) != -1)
- kbd_token_bits |= BIT(i);
-}
-
-static void kbd_init(void)
-{
- int ret;
-
- ret = kbd_init_info();
- kbd_init_tokens();
-
- if (kbd_token_bits != 0 || ret == 0)
- kbd_led_present = true;
-}
-
-static ssize_t kbd_led_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool convert;
- int value;
- int ret;
- char ch;
- u8 unit;
- int i;
-
- ret = sscanf(buf, "%d %c", &value, &ch);
- if (ret < 1)
- return -EINVAL;
- else if (ret == 1)
- ch = 's';
-
- if (value < 0)
- return -EINVAL;
-
- convert = false;
-
- switch (ch) {
- case 's':
- if (value > kbd_info.seconds)
- convert = true;
- unit = KBD_TIMEOUT_SECONDS;
- break;
- case 'm':
- if (value > kbd_info.minutes)
- convert = true;
- unit = KBD_TIMEOUT_MINUTES;
- break;
- case 'h':
- if (value > kbd_info.hours)
- convert = true;
- unit = KBD_TIMEOUT_HOURS;
- break;
- case 'd':
- if (value > kbd_info.days)
- convert = true;
- unit = KBD_TIMEOUT_DAYS;
- break;
- default:
- return -EINVAL;
- }
-
- if (quirks && quirks->needs_kbd_timeouts)
- convert = true;
-
- if (convert) {
- /* Convert value from current units to seconds */
- switch (unit) {
- case KBD_TIMEOUT_DAYS:
- value *= 24;
- case KBD_TIMEOUT_HOURS:
- value *= 60;
- case KBD_TIMEOUT_MINUTES:
- value *= 60;
- unit = KBD_TIMEOUT_SECONDS;
- }
-
- if (quirks && quirks->needs_kbd_timeouts) {
- for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
- if (value <= quirks->kbd_timeouts[i]) {
- value = quirks->kbd_timeouts[i];
- break;
- }
- }
- }
-
- if (value <= kbd_info.seconds && kbd_info.seconds) {
- unit = KBD_TIMEOUT_SECONDS;
- } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
- value /= 60;
- unit = KBD_TIMEOUT_MINUTES;
- } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
- value /= (60 * 60);
- unit = KBD_TIMEOUT_HOURS;
- } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
- value /= (60 * 60 * 24);
- unit = KBD_TIMEOUT_DAYS;
- } else {
- return -EINVAL;
- }
- }
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.timeout_value = value;
- new_state.timeout_unit = unit;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_timeout_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
- int len;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = sprintf(buf, "%d", state.timeout_value);
-
- switch (state.timeout_unit) {
- case KBD_TIMEOUT_SECONDS:
- return len + sprintf(buf+len, "s\n");
- case KBD_TIMEOUT_MINUTES:
- return len + sprintf(buf+len, "m\n");
- case KBD_TIMEOUT_HOURS:
- return len + sprintf(buf+len, "h\n");
- case KBD_TIMEOUT_DAYS:
- return len + sprintf(buf+len, "d\n");
- default:
- return -EINVAL;
- }
-
- return len;
-}
-
-static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
- kbd_led_timeout_show, kbd_led_timeout_store);
-
-static const char * const kbd_led_triggers[] = {
- "keyboard",
- "touchpad",
- /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
- "mouse",
-};
-
-static ssize_t kbd_led_triggers_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool triggers_enabled = false;
- bool als_enabled = false;
- bool disable_als = false;
- bool enable_als = false;
- int trigger_bit = -1;
- char trigger[21];
- int i, ret;
-
- ret = sscanf(buf, "%20s", trigger);
- if (ret != 1)
- return -EINVAL;
-
- if (trigger[0] != '+' && trigger[0] != '-')
- return -EINVAL;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- if (kbd_als_supported)
- als_enabled = kbd_is_als_mode_bit(state.mode_bit);
-
- if (kbd_triggers_supported)
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-
- if (kbd_als_supported) {
- if (strcmp(trigger, "+als") == 0) {
- if (als_enabled)
- return count;
- enable_als = true;
- } else if (strcmp(trigger, "-als") == 0) {
- if (!als_enabled)
- return count;
- disable_als = true;
- }
- }
-
- if (enable_als || disable_als) {
- new_state = state;
- if (enable_als) {
- if (triggers_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- } else {
- if (triggers_enabled) {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- } else {
- new_state.mode_bit = KBD_MODE_BIT_ON;
- }
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- if (kbd_triggers_supported) {
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
- continue;
- if (trigger[0] == '+' &&
- triggers_enabled && (state.triggers & BIT(i)))
- return count;
- if (trigger[0] == '-' &&
- (!triggers_enabled || !(state.triggers & BIT(i))))
- return count;
- trigger_bit = i;
- break;
- }
- }
-
- if (trigger_bit != -1) {
- new_state = state;
- if (trigger[0] == '+')
- new_state.triggers |= BIT(trigger_bit);
- else {
- new_state.triggers &= ~BIT(trigger_bit);
- /* NOTE: trackstick bit (2) must be disabled when
- * disabling touchpad bit (1), otherwise touchpad
- * bit (1) will not be disabled */
- if (trigger_bit == 1)
- new_state.triggers &= ~BIT(2);
- }
- if ((kbd_info.triggers & new_state.triggers) !=
- new_state.triggers)
- return -EINVAL;
- if (new_state.triggers && !triggers_enabled) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- }
- } else if (new_state.triggers == 0) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- else
- kbd_set_level(&new_state, 0);
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- if (new_state.mode_bit != KBD_MODE_BIT_OFF)
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- return -EINVAL;
-}
-
-static ssize_t kbd_led_triggers_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- bool triggers_enabled;
- int level, i, ret;
- int len = 0;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = 0;
-
- if (kbd_triggers_supported) {
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
- level = kbd_get_level(&state);
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if ((triggers_enabled || level <= 0) &&
- (state.triggers & BIT(i)))
- buf[len++] = '+';
- else
- buf[len++] = '-';
- len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
- }
- }
-
- if (kbd_als_supported) {
- if (kbd_is_als_mode_bit(state.mode_bit))
- len += sprintf(buf+len, "+als ");
- else
- len += sprintf(buf+len, "-als ");
- }
-
- if (len)
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
- kbd_led_triggers_show, kbd_led_triggers_store);
-
-static ssize_t kbd_led_als_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u8 setting;
- int ret;
-
- ret = kstrtou8(buf, 10, &setting);
- if (ret)
- return ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.als_setting = setting;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_als_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", state.als_setting);
-}
-
-static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
- kbd_led_als_show, kbd_led_als_store);
-
-static struct attribute *kbd_led_attrs[] = {
- &dev_attr_stop_timeout.attr,
- &dev_attr_start_triggers.attr,
- &dev_attr_als_setting.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(kbd_led);
-
-static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
-{
- int ret;
- u16 num;
- struct kbd_state state;
-
- if (kbd_get_max_level()) {
- ret = kbd_get_state(&state);
- if (ret)
- return 0;
- ret = kbd_get_level(&state);
- if (ret < 0)
- return 0;
- return ret;
- }
-
- if (kbd_get_valid_token_counts()) {
- ret = kbd_get_first_active_token_bit();
- if (ret < 0)
- return 0;
- for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return 0;
- return ffs(num) - 1;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
- return 0;
-}
-
-static void kbd_led_level_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u16 num;
-
- if (kbd_get_max_level()) {
- if (kbd_get_state(&state))
- return;
- new_state = state;
- if (kbd_set_level(&new_state, value))
- return;
- kbd_set_state_safe(&new_state, &state);
- return;
- }
-
- if (kbd_get_valid_token_counts()) {
- for (num = kbd_token_bits; num != 0 && value > 0; --value)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return;
- kbd_set_token_bit(ffs(num) - 1);
- return;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
-}
-
-static struct led_classdev kbd_led = {
- .name = "dell::kbd_backlight",
- .brightness_set = kbd_led_level_set,
- .brightness_get = kbd_led_level_get,
- .groups = kbd_led_groups,
-};
-
-static int __init kbd_led_init(struct device *dev)
-{
- kbd_init();
- if (!kbd_led_present)
- return -ENODEV;
- kbd_led.max_brightness = kbd_get_max_level();
- if (!kbd_led.max_brightness) {
- kbd_led.max_brightness = kbd_get_valid_token_counts();
- if (kbd_led.max_brightness)
- kbd_led.max_brightness--;
- }
- return led_classdev_register(dev, &kbd_led);
-}
-
-static void brightness_set_exit(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- /* Don't change backlight level on exit */
-};
-
-static void kbd_led_exit(void)
-{
- if (!kbd_led_present)
- return;
- kbd_led.brightness_set = brightness_set_exit;
- led_classdev_unregister(&kbd_led);
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -1879,8 +841,6 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
- kbd_led_init(&platform_device->dev);
-
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -1948,7 +908,6 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
- kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -1965,7 +924,5 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e225711bb8bc..9c48fb32f660 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
/* remove any sysfs entries */
if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+ mutex_lock(&rdev->mutex);
kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
rdev->open_count--;
rdev->exclusive = 0;
+ mutex_unlock(&rdev->mutex);
module_put(rdev->owner);
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 2809ae0d6bcd..ff828117798f 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;
.enable_mask = S2MPS14_ENABLE_MASK \
}
+#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
+#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C),
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10),
};
static int s2mps14_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b5e7c4670205..89ac1d5083c6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
{ "s2mps14-rtc", S2MPS14X },
+ { },
};
static struct platform_driver s5m_rtc_driver = {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f407e3763432..642c77c76b84 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxanswr");
card = CARD_FROM_CDEV(channel->ccwdev);
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxactch");
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
}
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
+/**
+ * qeth_send_control_data() - send control command to the card
+ * @card: qeth_card structure pointer
+ * @len: size of the command buffer
+ * @iob: qeth_cmd_buffer pointer
+ * @reply_cb: callback function pointer
+ * @cb_card: pointer to the qeth_card structure
+ * @cb_reply: pointer to the qeth_reply structure
+ * @cb_cmd: pointer to the original iob for non-IPA
+ * commands, or to the qeth_ipa_cmd structure
+ * for the IPA commands.
+ * @reply_param: private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
int qeth_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
+ int (*reply_cb)(struct qeth_card *cb_card,
+ struct qeth_reply *cb_reply,
+ unsigned long cb_cmd),
void *reply_param)
{
int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- iob = qeth_wait_for_buffer(&card->write);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ iob = qeth_get_buffer(&card->write);
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ } else {
+ dev_warn(&card->gdev->dev,
+ "The qeth driver ran out of channel command buffers\n");
+ QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+ dev_name(&card->gdev->dev));
+ }
return iob;
}
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "strtlan");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
QETH_PROT_IPV4);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
- cmd->data.setadapterparms.hdr.command_code = command;
- cmd->data.setadapterparms.hdr.used_total = 1;
- cmd->data.setadapterparms.hdr.seq_no = 1;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+ cmd->data.setadapterparms.hdr.command_code = command;
+ cmd->data.setadapterparms.hdr.used_total = 1;
+ cmd->data.setadapterparms.hdr.seq_no = 1;
+ }
return iob;
}
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return -ENOMEDIUM;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "qdiagass");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
QETH_DBF_TEXT(SETUP, 2, "diagtrap");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 80;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.mode = mode;
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_set_access_ctrl));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
QETH_SNMP_SETADP_CMDLENGTH + req_len);
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
-
+out:
kfree(ureq);
kfree(qinfo.udata);
return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_query_oat));
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
oat_req = &cmd->data.setadapterparms.data.query_oat;
oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
return -EOPNOTSUPP;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
(void *)carrier_info);
}
@@ -5060,11 +5133,23 @@ retriable:
card->options.adp.supported_funcs = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
- qeth_query_ipassists(card, QETH_PROT_IPV4);
- if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
- qeth_query_setadapterparms(card);
- if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
- qeth_query_setdiagass(card);
+ rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+ if (rc == -ENOMEM)
+ goto out;
+ if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ rc = qeth_query_setadapterparms(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ goto out;
+ }
+ }
+ if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ rc = qeth_query_setdiagass(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ goto out;
+ }
+ }
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d02cd1a67943..ce87ae72edbd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
- enum qeth_ipa_cmds,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long));
+ enum qeth_ipa_cmds);
static void qeth_l2_set_multicast_list(struct net_device *);
static int qeth_l2_recover(void *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
return ndev;
}
-static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Sgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- /* MAC already registered, needed in couple/uncouple case */
- if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
- QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
- mac, QETH_CARD_IFNAME(card));
- cmd->hdr.return_code = 0;
+ if (retcode)
+ QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+ switch (retcode) {
+ case IPA_RC_SUCCESS:
+ rc = 0;
+ break;
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ rc = -ENOSYS;
+ break;
+ case IPA_RC_L2_ADDR_TABLE_FULL:
+ rc = -ENOSPC;
+ break;
+ case IPA_RC_L2_DUP_MAC:
+ case IPA_RC_L2_DUP_LAYER3_MAC:
+ rc = -EEXIST;
+ break;
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ rc = -EPERM;
+ break;
+ case IPA_RC_L2_MAC_NOT_FOUND:
+ rc = -ENOENT;
+ break;
+ case -ENOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EIO;
+ break;
}
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ return rc;
}
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_CARD_TEXT(card, 2, "L2Sgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
- qeth_l2_send_setgroupmac_cb);
-}
-
-static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Dgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ QETH_CARD_TEXT(card, 2, "L2Sgmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETGMAC));
+ if (rc == -EEXIST)
+ QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
+ mac, QETH_CARD_IFNAME(card));
+ else if (rc)
+ QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Dgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
- qeth_l2_send_delgroupmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELGMAC));
+ if (rc)
+ QETH_DBF_MESSAGE(2,
+ "Could not delete group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
mc->is_vmac = vmac;
if (vmac) {
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- NULL);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
} else {
- rc = qeth_l2_send_setgroupmac(card, mac);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setgroupmac(card, mac));
}
if (!rc)
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
if (del) {
if (mc->is_vmac)
qeth_l2_send_setdelmac(card, mc->mc_addr,
- IPA_CMD_DELVMAC, NULL);
+ IPA_CMD_DELVMAC);
else
qeth_l2_send_delgroupmac(card, mc->mc_addr);
}
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
+ int rc;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
id->vid = vid;
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ if (rc) {
+ kfree(id);
+ return rc;
+ }
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
+ int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
}
spin_unlock_bh(&card->vlanlock);
if (tmpid) {
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
- return 0;
+ return rc;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +560,62 @@ out:
}
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
- enum qeth_ipa_cmds ipacmd,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long))
+ enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+ return qeth_send_ipa_cmd(card, iob, NULL, NULL);
}
-static int qeth_l2_send_setmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
{
- struct qeth_ipa_cmd *cmd;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Smaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETVMAC));
+ if (rc == 0) {
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ dev_info(&card->gdev->dev,
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
+ } else {
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- switch (cmd->hdr.return_code) {
- case IPA_RC_L2_DUP_MAC:
- case IPA_RC_L2_DUP_LAYER3_MAC:
+ switch (rc) {
+ case -EEXIST:
dev_warn(&card->gdev->dev,
- "MAC address %pM already exists\n",
- cmd->data.setdelmac.mac);
+ "MAC address %pM already exists\n", mac);
break;
- case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
- case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ case -EPERM:
dev_warn(&card->gdev->dev,
- "MAC address %pM is not authorized\n",
- cmd->data.setdelmac.mac);
- break;
- default:
+ "MAC address %pM is not authorized\n", mac);
break;
}
- } else {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
- OSA_ADDR_LEN);
- dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
- }
- return 0;
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
- QETH_CARD_TEXT(card, 2, "L2Setmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- qeth_l2_send_setmac_cb);
-}
-
-static int qeth_l2_send_delmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 2, "L2Dmaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
- return 0;
}
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-
- return 0;
+ return rc;
}
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Delmac");
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
return 0;
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
- qeth_l2_send_delmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELVMAC));
+ if (rc == 0)
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ return rc;
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
if (rc) {
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
"device %s: x%x\n", CARD_BUS_ID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
return rc;
}
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
+ if (!rc || (rc == -ENOENT))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (rc)
return rc;
rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
- if (rc)
- return rc;
- return 0;
+ return rc;
}
EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength = cmdlength;
cmd->data.sbp.hdr.command_code = setcmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 625227ad16ee..e2a0ee845399 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (addr->proto == QETH_PROT_IPV6) {
memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setrtg.type = (type);
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setassparms.hdr.assist_no = ipa_func;
- cmd->data.setassparms.hdr.length = 8 + len;
- cmd->data.setassparms.hdr.command_code = cmd_code;
- cmd->data.setassparms.hdr.return_code = 0;
- cmd->data.setassparms.hdr.seq_no = 0;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setassparms.hdr.assist_no = ipa_func;
+ cmd->data.setassparms.hdr.length = 8 + len;
+ cmd->data.setassparms.hdr.command_code = cmd_code;
+ cmd->data.setassparms.hdr.return_code = 0;
+ cmd->data.setassparms.hdr.seq_no = 0;
+ }
return iob;
}
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "simassp6");
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
0, QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
length = sizeof(__u32);
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
length, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, length, data,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
QETH_DBF_TEXT(SETUP, 2, "diagtrac");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
IPA_CMD_ASS_ARP_QUERY_INFO,
sizeof(struct qeth_arp_query_data) - sizeof(char),
prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_ADD_ENTRY,
sizeof(struct qeth_arp_cache_entry),
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
sizeof(struct qeth_arp_cache_entry),
(unsigned long) entry,
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_REMOVE_ENTRY,
12,
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
12, (unsigned long)buf,
qeth_l3_default_setassparms_cb, NULL);
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ int rc;
+
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV;
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
- qeth_l3_iqd_read_initial_mac(card);
+ rc = qeth_l3_iqd_read_initial_mac(card);
+ if (rc)
+ return rc;
if (card->options.hsuid[0])
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
if (!card->options.sniffer) {
rc = qeth_l3_start_ipassists(card);
if (rc) {
@@ -3410,10 +3438,10 @@ contin:
}
rc = qeth_l3_setrouting_v4(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
rc = qeth_l3_setrouting_v6(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
}
netif_tx_disable(card->dev);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df4e27cd996a..9219953ee949 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
+ ipr_cmd->eh_comp = NULL;
ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
@@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ * @match: match function to use
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+ int (*match)(struct ipr_cmnd *, void *))
+{
+ struct ipr_cmnd *ipr_cmd;
+ int wait;
+ unsigned long flags;
+ struct ipr_hrr_queue *hrrq;
+ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+ DECLARE_COMPLETION_ONSTACK(comp);
+
+ ENTER;
+ do {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
+
+ if (!timeout) {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait)
+ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
+
+ LEAVE;
+ return SUCCESS;
+}
+
static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
@@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
spin_lock_irq(cmd->device->host->host_lock);
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
return rc;
}
@@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
{
unsigned long flags;
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
ENTER;
+ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
rc = ipr_cancel_op(scsi_cmd);
spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
LEAVE;
return rc;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b4f3eec51bc9..ec03b42fa2b9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1606,6 +1606,7 @@ struct ipr_cmnd {
struct scsi_device *sdev;
} u;
+ struct completion *eh_comp;
struct ipr_hrr_queue *hrrq;
struct ipr_ioa_cfg *ioa_cfg;
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e02885451425..9b3829931f40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
return -ENXIO;
if (!get_device(&sdev->sdev_gendev))
return -ENXIO;
- /* We can fail this if we're doing SCSI operations
+ /* We can fail try_module_get if we're doing SCSI operations
* from module exit (like cache flush) */
- try_module_get(sdev->host->hostt->module);
+ __module_get(sdev->host->hostt->module);
return 0;
}
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
-#ifdef CONFIG_MODULE_UNLOAD
- struct module *module = sdev->host->hostt->module;
-
- /* The module refcount will be zero if scsi_device_get()
- * was called from a module removal routine */
- if (module && module_refcount(module) != 0)
- module_put(module);
-#endif
+ module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b8b51bc29b4..4aca1b0378c2 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
req_opcode = cmd[3];
req_sa = get_unaligned_be16(cmd + 4);
alloc_len = get_unaligned_be32(cmd + 6);
- if (alloc_len < 4 && alloc_len > 0xffff) {
+ if (alloc_len < 4 || alloc_len > 0xffff) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
return check_condition_result;
}
@@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
a_len = 8192;
else
a_len = alloc_len;
- arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
+ arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
if (NULL == arr) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d5c0b8cb0bb..17bb541f7cc2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1143,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd)
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
- BUG_ON(prot_sdb == NULL);
+ if (prot_sdb == NULL) {
+ /*
+ * This can happen if someone (e.g. multipath)
+ * queues a command to a device on an adapter
+ * that does not support DIX.
+ */
+ WARN_ON_ONCE(1);
+ error = BLKPREP_KILL;
+ goto err_exit;
+ }
+
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 7281316a5ecb..a67d37c7e3c0 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
iounmap(clk_reg);
dws->num_cs = 16;
- dws->fifo_len = 40; /* FIFO has 40 words buffer */
#ifdef CONFIG_SPI_DW_MID_DMA
dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d0d5542efc06..8edcd1b84562 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
- for (fifo = 2; fifo <= 257; fifo++) {
+ for (fifo = 2; fifo <= 256; fifo++) {
dw_writew(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
break;
}
- dws->fifo_len = (fifo == 257) ? 0 : fifo;
+ dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
dw_writew(dws, DW_SPI_TXFLTR, 0);
}
}
@@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dws);
if (ret) {
- dev_warn(&master->dev, "DMA init failed\n");
+ dev_warn(dev, "DMA init failed\n");
dws->dma_inited = 0;
}
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 05c623cfb078..23822e7df6c1 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
- spi_finalize_current_message(drv_data->master);
drv_data->cur_chip = NULL;
+ spi_finalize_current_message(drv_data->master);
}
static void reset_sccr1(struct driver_data *drv_data)
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 96a5fc0878d8..3ab7a21445fc 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,7 +82,7 @@ struct sh_msiof_spi_priv {
#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
#define MDR1_FLD_SHIFT 2
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f6010203e..65d610abe06e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
return 0;
}
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig
index 81784c6f7b88..77d8753f6ba4 100644
--- a/drivers/staging/media/tlg2300/Kconfig
+++ b/drivers/staging/media/tlg2300/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_TLG2300
tristate "Telegent TLG2300 USB video capture support (Deprecated)"
depends on VIDEO_DEV && I2C && SND && DVB_CORE
+ depends on MEDIA_USB_SUPPORT
select VIDEO_TUNER
select VIDEO_TVEEPROM
depends on RC_CORE
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 093535c6217b..120b70d72d79 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
static const struct mfd_cell nvec_devices[] = {
{
.name = "nvec-kbd",
- .id = 1,
},
{
.name = "nvec-mouse",
- .id = 1,
},
{
.name = "nvec-power",
- .id = 1,
+ .id = 0,
},
{
.name = "nvec-power",
- .id = 2,
+ .id = 1,
},
{
.name = "nvec-paz00",
- .id = 1,
},
};
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
nvec_msg_free(nvec, msg);
}
- ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+ ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
if (ret)
dev_err(nvec->dev, "error adding subdevices\n");
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index de0c9c9d7091..a6315abe7b7c 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
return 0;
+ /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
+ return 1;
+
/* NOTE: can't use usb_match_id() since interface caches
* aren't set up yet. this is cut/paste from that code.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0ffb4ed0a945..41e510ae8c83 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Protocol and OTG Electrical Test Device */
+ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index ad43c5bc1ef1..02e3e2d4ea56 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(&hsotg->lock);
+
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
- spin_lock(&hsotg->lock);
-
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
}
}
- spin_unlock(&hsotg->lock);
out:
+ spin_unlock(&hsotg->lock);
return retval;
}
EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index ccfdfb24b240..2f9735b35338 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
return phy;
}
- return ERR_PTR(-EPROBE_DEFER);
+ return ERR_PTR(-ENODEV);
}
static struct usb_phy *__usb_find_phy_dev(struct device *dev,
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 11c7a9676441..d684b4b8108f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
"SCM Microsystems",
"eUSB SCSI Adapter (Bus Powered)",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+ USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
+UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 6df4357d9ee3..dbc00e56c7f5 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
"External HDD",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+
+/* Reported-by: Richard Henderson <rth@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+ "SimpleTech",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 5927c0a98a74..bcfd2a22208f 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = {
.shutdown = cdns_wdt_shutdown,
.driver = {
.name = "cdns-wdt",
- .owner = THIS_MODULE,
.of_match_table = cdns_wdt_of_match,
.pm = &cdns_wdt_pm_ops,
},
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index d6add516a7a7..5142bbabe027 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -52,6 +52,8 @@
#define IMX2_WDT_WRSR 0x04 /* Reset Status Register */
#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */
+#define IMX2_WDT_WMCR 0x08 /* Misc Register */
+
#define IMX2_WDT_MAX_TIME 128
#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
@@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
imx2_wdt_ping_if_active(wdog);
+ /*
+ * Disable the watchdog power down counter at boot. Otherwise the power
+ * down counter will pull down the #WDOG interrupt line for one clock
+ * cycle.
+ */
+ regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
+
ret = watchdog_register_device(wdog);
if (ret) {
dev_err(&pdev->dev, "cannot register watchdog device\n");
@@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-/* Disable watchdog if it is active during suspend */
+/* Disable watchdog if it is active or non-active but still running */
static int imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
- imx2_wdt_ping(wdog);
+ /* The watchdog IP block is running */
+ if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping(wdog);
- /* Watchdog has been stopped but IP block is still running */
- if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
- del_timer_sync(&wdev->timer);
+ /* The watchdog is not active */
+ if (!watchdog_active(wdog))
+ del_timer_sync(&wdev->timer);
+ }
clk_disable_unprepare(wdev->clk);
@@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev)
clk_prepare_enable(wdev->clk);
if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
- /* Resumes from deep sleep we need restart
- * the watchdog again.
+ /*
+ * If the watchdog is still active and resumes
+ * from deep sleep state, need to restart the
+ * watchdog again.
*/
imx2_wdt_setup(wdog);
imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
} else if (imx2_wdt_is_running(wdev)) {
+ /* Resuming from non-deep sleep state. */
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
- mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+ /*
+ * But the watchdog is not active, then start
+ * the timer again.
+ */
+ if (!watchdog_active(wdog))
+ mod_timer(&wdev->timer,
+ jiffies + wdog->timeout * HZ / 2);
}
return 0;
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index ef6a298e8c45..1f4155ee3404 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = {
.remove = meson_wdt_remove,
.shutdown = meson_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = meson_wdt_dt_ids,
},