summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/android/binder.c3
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/libata-zpodd.c34
-rw-r--r--drivers/block/zram/zram_drv.c32
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-mockup.c10
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c18
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c72
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c5
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-quirks.c11
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/iommu/amd_iommu.c9
-rw-r--r--drivers/iommu/amd_iommu_init.c7
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c19
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/leds/leds-pca9532.c8
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/misc/habanalabs/command_submission.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c7
-rw-r--r--drivers/misc/habanalabs/device.c71
-rw-r--r--drivers/misc/habanalabs/goya/goya.c65
-rw-r--r--drivers/misc/habanalabs/habanalabs.h21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/memory.c38
-rw-r--r--drivers/misc/habanalabs/mmu.c6
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c34
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/io-cmd-file.c20
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/bw_notification.c23
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/s390/cio/chsc.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/scsi/zfcp_erp.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h7
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c39
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/soc/bcm/bcm2835-power.c49
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/Kconfig1
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c10
-rw-r--r--drivers/staging/erofs/dir.c45
-rw-r--r--drivers/staging/erofs/unzip_vle.c45
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c7
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts29
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi73
-rw-r--r--drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt48
-rw-r--r--drivers/staging/mt7621-eth/Kconfig39
-rw-r--r--drivers/staging/mt7621-eth/Makefile14
-rw-r--r--drivers/staging/mt7621-eth/TODO13
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c250
-rw-r--r--drivers/staging/mt7621-eth/ethtool.h15
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7620.h277
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7621.c297
-rw-r--r--drivers/staging/mt7621-eth/mdio.c275
-rw-r--r--drivers/staging/mt7621-eth/mdio.h27
-rw-r--r--drivers/staging/mt7621-eth/mdio_mt7620.c173
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c2178
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.h716
-rw-r--r--drivers/staging/mt7621-eth/soc_mt7621.c161
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet.c40
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/spk_priv.h1
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vt6655/device_main.c11
-rw-r--r--drivers/tty/serial/ar933x_uart.c24
-rw-r--r--drivers/tty/serial/atmel_serial.c52
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c12
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/common.c2
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c5
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-rcar.c1
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.h8
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/mtu3/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c27
-rw-r--r--drivers/usb/typec/tcpm/wcove.c9
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c106
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c26
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c32
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h9
-rw-r--r--drivers/virt/vboxguest/vmmdev.h8
245 files changed, 1796 insertions, 5510 deletions
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..5e9d7348c16f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /* Enable the requested GPE */
+ /* Clear the GPE status */
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+ /* Enable the requested GPE */
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 6ecbbabf1233..eec263c9019e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
- /* Initialize debug output. Linux does not use ACPICA defaults */
- acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
-
#ifdef CONFIG_X86
/*
* If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1b207fca1420..d4244e7d0e38 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
cpc_read(cpunum, nominal_reg, &nom);
perf_caps->nominal_perf = nom;
- cpc_read(cpunum, guaranteed_reg, &guaranteed);
- perf_caps->guaranteed_perf = guaranteed;
+ if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
+ IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
+ perf_caps->guaranteed_perf = 0;
+ } else {
+ cpc_read(cpunum, guaranteed_reg, &guaranteed);
+ perf_caps->guaranteed_perf = guaranteed;
+ }
cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
perf_caps->lowest_nonlinear_perf = min_nonlinear;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882da64c..4b9c7ca492e6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+ mm = alloc->vma_vm_mm;
+ if (!mmget_not_zero(mm))
+ goto err_mmget;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
- if (vma) {
- if (!mmget_not_zero(alloc->vma_vm_mm))
- goto err_mmget;
- mm = alloc->vma_vm_mm;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_write_mmap_sem_failed;
- }
list_lru_isolate(lru, item);
spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
-
- up_read(&mm->mmap_sem);
- mmput(mm);
}
+ up_write(&mm->mmap_sem);
+ mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9953a8..173e6f2dd9af 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
/* Per the spec, only slot type and drawer type ODD can be supported */
static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
{
- char buf[16];
+ char *buf;
unsigned int ret;
- struct rm_feature_desc *desc = (void *)(buf + 8);
+ struct rm_feature_desc *desc;
struct ata_taskfile tf;
static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
- 0, sizeof(buf),
+ 0, 16,
0, 0, 0,
};
+ buf = kzalloc(16, GFP_KERNEL);
+ if (!buf)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+ desc = (void *)(buf + 8);
+
ata_tf_init(dev, &tf);
tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_PIO;
- tf.lbam = sizeof(buf);
+ tf.lbam = 16;
ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
- buf, sizeof(buf), 0);
- if (ret)
+ buf, 16, 0);
+ if (ret) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (be16_to_cpu(desc->feature_code) != 3)
+ if (be16_to_cpu(desc->feature_code) != 3) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_SLOT;
- else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+ } else if (desc->mech_type == 1 && desc->load == 0 &&
+ desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_DRAWER;
- else
+ } else {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
}
/* Test if ODD is zero power ready by sense code */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e7a5f1d1c314..399cad7daae7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
int index;
- char mode_buf[8];
- ssize_t sz;
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
- if (sz <= 0)
- return -EINVAL;
-
- /* ignore trailing new line */
- if (mode_buf[sz - 1] == '\n')
- mode_buf[sz - 1] = 0x00;
-
- if (strcmp(mode_buf, "all"))
+ if (!sysfs_streq(buf, "all"))
return -EINVAL;
down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
- ssize_t ret, sz;
- char mode_buf[8];
- int mode = -1;
+ ssize_t ret;
+ int mode;
unsigned long blk_idx = 0;
- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
- if (sz <= 0)
- return -EINVAL;
-
- /* ignore trailing newline */
- if (mode_buf[sz - 1] == '\n')
- mode_buf[sz - 1] = 0x00;
-
- if (!strcmp(mode_buf, "idle"))
+ if (sysfs_streq(buf, "idle"))
mode = IDLE_WRITEBACK;
- else if (!strcmp(mode_buf, "huge"))
+ else if (sysfs_streq(buf, "huge"))
mode = HUGE_WRITEBACK;
-
- if (mode == -1)
+ else
return -EINVAL;
down_read(&zram->init_lock);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
- depends on TTY
+ depends on TTY && BROKEN
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e22f0dbaebb1..2986119dd31f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
if (ret)
return ret;
- return cppc_perf.guaranteed_perf;
+ if (cppc_perf.guaranteed_perf)
+ return cppc_perf.guaranteed_perf;
+
+ return cppc_perf.nominal_perf;
}
#else /* CONFIG_ACPI_CPPC_LIB */
@@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void)
const struct x86_cpu_id *id;
int rc;
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
if (no_load)
return -ENODEV;
@@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void)
} else {
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) {
- pr_info("CPU ID not supported\n");
+ pr_info("CPU model not supported\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 3f49427766b8..2b51e0718c9f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
clk_put(priv->clk);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- kfree(priv);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+ kfree(priv);
return 0;
}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 4e0eede599a8..ac0301b69593 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dmadev->nr_channels = nr_channels;
dmadev->nr_requests = nr_requests;
- ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+ device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
dmadev->ahb_addr_masks,
count);
- if (ret)
- return ret;
dmadev->nr_ahb_addr_masks = count;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 91b90c0cea73..12acdac85820 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
if (err < 0)
goto out;
- if (err & BIT(pos))
- err = -EACCES;
+ if (value & BIT(pos)) {
+ err = -EPERM;
+ goto out;
+ }
err = 0;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 854bce4fb9e7..217507002dbc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->offset_timer =
devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+ if (!gpio->offset_timer)
+ return -ENOMEM;
return aspeed_gpio_setup_irqs(gpio, pdev);
}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 0ecd2369c2ca..a09d2f9ebacc 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
mutex_init(&exar_gpio->lock);
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+ if (index < 0)
+ goto err_destroy;
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 154d959e8993..b6a4efce7c92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
struct gpio_mockup_chip *chip;
struct seq_file *sfile;
struct gpio_chip *gc;
+ int val, cnt;
char buf[3];
- int val, rv;
if (*ppos != 0)
return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
gc = &chip->gc;
val = gpio_mockup_get(gc, priv->offset);
- snprintf(buf, sizeof(buf), "%d\n", val);
+ cnt = snprintf(buf, sizeof(buf), "%d\n", val);
- rv = copy_to_user(usr_buf, buf, sizeof(buf));
- if (rv)
- return rv;
-
- return sizeof(buf) - 1;
+ return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
}
static ssize_t gpio_mockup_debugfs_write(struct file *file,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8b9c3ab70f6e..6a3ec575a404 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* to determine if the flags should have inverted semantics.
*/
if (IS_ENABLED(CONFIG_SPI_MASTER) &&
- of_property_read_bool(np, "cs-gpios")) {
+ of_property_read_bool(np, "cs-gpios") &&
+ !strcmp(propname, "cs-gpios")) {
struct device_node *child;
u32 cs;
int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
* conflict and the "spi-cs-high" flag will
* take precedence.
*/
- if (of_property_read_bool(np, "spi-cs-high")) {
+ if (of_property_read_bool(child, "spi-cs-high")) {
if (*flags & OF_GPIO_ACTIVE_LOW) {
pr_warn("%s GPIO handle specifies active low - ignored\n",
- of_node_full_name(np));
+ of_node_full_name(child));
*flags &= ~OF_GPIO_ACTIVE_LOW;
}
} else {
if (!(*flags & OF_GPIO_ACTIVE_LOW))
pr_info("%s enforce active low on chipselect handle\n",
- of_node_full_name(np));
+ of_node_full_name(child));
*flags |= OF_GPIO_ACTIVE_LOW;
}
break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
- return of_gpiochip_scan_gpios(chip);
+ status = of_gpiochip_scan_gpios(chip);
+ if (status) {
+ of_node_put(chip->of_node);
+ gpiochip_remove_pin_ranges(chip);
+ }
+
+ return status;
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 144af0733581..0495bf1d480a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
}
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+ return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = gpio_set_config(chip, gpio, packed);
+ rc = chip->set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..ac0d646a7b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3625,6 +3625,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
struct pci_dev *pdev = adev->pdev;
enum pci_bus_speed cur_speed;
enum pcie_link_width cur_width;
+ u32 ret = 1;
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3633,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
while (pdev) {
cur_speed = pcie_get_speed_cap(pdev);
cur_width = pcie_get_width_cap(pdev);
+ ret = pcie_bandwidth_available(adev->pdev, NULL,
+ NULL, &cur_width);
+ if (!ret)
+ cur_width = PCIE_LNK_WIDTH_RESRV;
if (cur_speed != PCI_SPEED_UNKNOWN) {
if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d0309e8c9d12..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- adev->gfx.rlc.funcs->reset(adev);
-
gfx_v9_0_init_pg(adev);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index fb27783d7a54..81127f7d6ed1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc(
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(new_con_state->base.connector);
struct drm_display_mode *mode = &new_crtc_state->base.mode;
+ int vrefresh = drm_mode_vrefresh(mode);
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
- aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+ vrefresh >= aconnector->min_vfreq &&
+ vrefresh <= aconnector->max_vfreq;
if (new_crtc_state->vrr_supported) {
new_crtc_state->stream->ignore_msa_timing_param = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4eba3c4800b6..ea18e9c2d8ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ dal_ddc_service_write_scdc_data(
+ stream->link->ddc, 0,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 9aa7bec1b5fe..23b5b94a4939 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
* MP0CLK DS
*/
data->registry_data.disallowed_features = 0xE0041C00;
+ /* ECC feature should be disabled on old SMUs */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ hwmgr->smu_version = smum_get_argument(hwmgr);
+ if (hwmgr->smu_version < 0x282100)
+ data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
+
data->registry_data.od_state_in_dc_support = 0;
data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
+ data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
for (i = 0; i < GNLD_FEATURES_MAX; i++) {
data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
"FCLK_DS",
"MP1CLK_DS",
"MP0CLK_DS",
- "XGMI"};
+ "XGMI",
+ "ECC"};
static const char *output_title[] = {
"FEATURES",
"BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
struct vega20_single_dpm_table *dpm_table;
bool vblank_too_short = false;
bool disable_mclk_switching;
+ bool disable_fclk_switching;
uint32_t i, latency;
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
if (hwmgr->display_config->nb_pstate_switch_disable)
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ if ((disable_mclk_switching &&
+ (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
+ hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
+ disable_fclk_switching = true;
+ else
+ disable_fclk_switching = false;
+
/* fclk */
dpm_table = &(data->dpm_table.fclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
- if (hwmgr->display_config->nb_pstate_switch_disable)
+ if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
/* vclk */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index a5bc758ae097..ac2a3118a0ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -80,6 +80,7 @@ enum {
GNLD_DS_MP1CLK,
GNLD_DS_MP0CLK,
GNLD_XGMI,
+ GNLD_ECC,
GNLD_FEATURES_MAX
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
#define FEATURE_DS_MP1CLK_BIT 30
#define FEATURE_DS_MP0CLK_BIT 31
#define FEATURE_XGMI_BIT 32
-#define FEATURE_SPARE_33_BIT 33
+#define FEATURE_ECC_BIT 33
#define FEATURE_SPARE_34_BIT 34
#define FEATURE_SPARE_35_BIT 35
#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
-#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT )
+#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
+#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b01d48..05bbc2b622fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
-
- mutex_lock(&drm_global_mutex);
- if (dev->open_count == 0)
- drm_dev_put(dev);
- mutex_unlock(&drm_global_mutex);
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_dev_unplug);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349ff2d16..af2ab640cadb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
best_depth = fmt->depth;
}
}
- if (sizes.surface_depth != best_depth) {
+ if (sizes.surface_depth != best_depth && best_depth) {
DRM_INFO("requested bpp %d, scaled depth down to %d",
sizes.surface_bpp, best_depth);
sizes.surface_depth = best_depth;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbca6e7e..7caa3c7ed978 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
drm_close_helper(filp);
- if (!--dev->open_count) {
+ if (!--dev->open_count)
drm_lastclose(dev);
- if (drm_dev_is_unplugged(dev))
- drm_put_dev(dev);
- }
+
mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3f7618..3592d04c33b2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
}
if (index_mode) {
- if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+ if (guest_gma >= I915_GTT_PAGE_SIZE) {
ret = -EFAULT;
goto err;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e3f9caa7839f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
/**
* intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
* @vgpu: a vGPU
- * @conncted: link state
+ * @connected: link state
*
* This function is used to trigger hotplug interrupt for vGPU
*
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..5d887f7cc0d5 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -238,9 +238,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
default:
gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
}
-
- info->size = (((p.stride * p.height * p.bpp) / 8) +
- (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
if (ret)
@@ -262,14 +259,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
info->x_hot = UINT_MAX;
info->y_hot = UINT_MAX;
}
-
- info->size = (((info->stride * c.height * c.bpp) / 8)
- + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
} else {
gvt_vgpu_err("invalid plane id:%d\n", plane_id);
return -EINVAL;
}
+ info->size = (info->stride * info->height + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c7103dd2d8d5..cf133ef03873 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
}
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+ mutex_lock(&gvt->gtt.ppgtt_mm_lock);
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
return mm;
}
@@ -1942,7 +1946,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
- atomic_dec(&mm->pincount);
+ atomic_dec_if_positive(&mm->pincount);
}
/**
@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (ret)
return ret;
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_move_tail(&mm->ppgtt_mm.lru_list,
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
}
return 0;
@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
struct intel_vgpu_mm *mm;
struct list_head *pos, *n;
+ mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
continue;
list_del_init(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
invalidate_ppgtt_mm(mm);
return 1;
}
+ mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
return 0;
}
@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
}
}
INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+ mutex_init(&gvt->gtt.ppgtt_mm_lock);
return 0;
}
@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
if (mm->type == INTEL_GVT_MM_PPGTT) {
+ mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
list_del_init(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
if (mm->ppgtt_mm.shadowed)
invalidate_ppgtt_mm(mm);
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d8cb04cc946d..edb610dc5d86 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head;
+ struct mutex ppgtt_mm_lock;
struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb9051a..7902fb162d09 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f936fdaa..05b953793316 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
- return -1;
+ return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
- if (ret < 0) {
- gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
- return ret;
- }
-
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+ struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
+ ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ if (ret < 0) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ goto err_req;
+ }
+
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ret = prepare_workload(workload);
out:
+ if (ret) {
+ /* We might still need to add request with
+ * clean ctx to retire it properly..
+ */
+ rq = fetch_and_zero(&workload->req);
+ i915_request_put(rq);
+ }
+
if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
goto out;
}
- if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+ if (!scheduler->current_vgpu->active ||
+ list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
goto out;
/*
@@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_runtime_pm_put_unchecked(dev_priv);
}
- if (ret && (vgpu_is_vm_unhealthy(ret))) {
- enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ if (ret) {
+ if (vgpu_is_vm_unhealthy(ret))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
intel_vgpu_destroy_workload(workload);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..f6f6e5b78e97 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
&ctx);
if (ret) {
- ret = -EINTR;
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
break;
}
crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb9e69c..a67a63b5aa84 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
- INTEL_DEVID(dev_priv) == 0x87C0)
+ INTEL_DEVID(dev_priv) == 0x87C0 || \
+ INTEL_DEVID(dev_priv) == 0x87CA)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586469f9..047855dd8c6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
#define GEN11_EU_DISABLE _MMIO(0x9134)
#define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
_TRANS_DDI_FUNC_CTL2_A)
#define PORT_SYNC_MODE_ENABLE (1 << 4)
-#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..b9b0ea4e2404 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
struct i915_gem_context *ctx;
ctx = live_context(i915, file);
- if (!ctx)
+ if (IS_ERR(ctx))
break;
/* We will need some GGTT space for the rq's context */
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3eb774..8a4ebcb6405c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
ret = drm_dev_register(drm, 0);
if (ret)
- goto free_drm;
+ goto uninstall_irq;
drm_fbdev_generic_setup(drm, 32);
return 0;
+uninstall_irq:
+ drm_irq_uninstall(drm);
free_drm:
drm_dev_put(drm);
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
static void meson_drv_unbind(struct device *dev)
{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct meson_drm *priv = drm->dev_private;
+ struct meson_drm *priv = dev_get_drvdata(dev);
+ struct drm_device *drm = priv->drm;
if (priv->canvas) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
}
drm_dev_unregister(drm);
+ drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e28814f4ea6c..563953ec6ad0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
/* If sink max TMDS clock, we reject the mode */
- if (mode->clock > connector->display_info.max_tmds_clock)
+ if (connector->display_info.max_tmds_clock &&
+ mode->clock > connector->display_info.max_tmds_clock)
return MODE_BAD;
/* Check against non-VIC supported modes */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7d4c6073ea5..0d4ade9d4722 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
clk_disable(vop->hclk);
}
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+ if (win->phy->scl && win->phy->scl->ext) {
+ VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+ VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+ }
+
+ VOP_WIN_SET(vop, win, enable, 0);
+}
+
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
}
spin_unlock(&vop->reg_lock);
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
spin_unlock(&vop->reg_lock);
}
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
int channel = i * 2 + 1;
VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
- VOP_WIN_SET(vop, win, enable, 0);
+ vop_win_disable(vop, win);
VOP_WIN_SET(vop, win, gate, 1);
}
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index ba9b3cfb8c3d..b3436c2aed68 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
+ struct tegra_dc *dc;
u32 value;
/* rien ne va plus */
if (!old_state || !old_state->crtc)
return;
+ dc = to_tegra_dc(old_state->crtc);
+
/*
* XXX Legacy helpers seem to sometimes call ->atomic_disable() even
* on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 39bfed9623de..982ce37ecde1 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
if (vic->booted)
return 0;
+#ifdef CONFIG_IOMMU_API
if (vic->config->supports_sid) {
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
vic_writel(vic, value, VIC_THI_STREAMID1);
}
}
+#endif
/* setup clockgating registers */
vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 66885c24590f..c1bd5e3d9e4a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -18,18 +18,19 @@
#include "udl_connector.h"
#include "udl_drv.h"
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
- u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
{
int ret, i;
u8 *read_buff;
+ struct udl_device *udl = data;
read_buff = kmalloc(2, GFP_KERNEL);
if (!read_buff)
- return false;
+ return -1;
- for (i = 0; i < EDID_LENGTH; i++) {
- int bval = (i + block_idx * EDID_LENGTH) << 8;
+ for (i = 0; i < len; i++) {
+ int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udl->udev,
usb_rcvctrlpipe(udl->udev, 0),
(0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
kfree(read_buff);
- return false;
+ return -1;
}
- buff[i] = read_buff[1];
+ buf[i] = read_buff[1];
}
kfree(read_buff);
- return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
- int *result_buff_size)
-{
- int i, extensions;
- u8 *block_buff = NULL, *buff_ptr;
-
- block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (block_buff == NULL)
- return false;
-
- if (udl_get_edid_block(udl, 0, block_buff) &&
- memchr_inv(block_buff, 0, EDID_LENGTH)) {
- extensions = ((struct edid *)block_buff)->extensions;
- if (extensions > 0) {
- /* we have to read all extensions one by one */
- *result_buff_size = EDID_LENGTH * (extensions + 1);
- *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
- buff_ptr = *result_buff;
- if (buff_ptr == NULL) {
- kfree(block_buff);
- return false;
- }
- memcpy(buff_ptr, block_buff, EDID_LENGTH);
- kfree(block_buff);
- buff_ptr += EDID_LENGTH;
- for (i = 1; i < extensions; ++i) {
- if (udl_get_edid_block(udl, i, buff_ptr)) {
- buff_ptr += EDID_LENGTH;
- } else {
- kfree(*result_buff);
- *result_buff = NULL;
- return false;
- }
- }
- return true;
- }
- /* we have only base edid block */
- *result_buff = block_buff;
- *result_buff_size = EDID_LENGTH;
- return true;
- }
-
- kfree(block_buff);
-
- return false;
+ return 0;
}
static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- u8 *edid_buff = NULL;
- int edid_buff_size = 0;
struct udl_device *udl = connector->dev->dev_private;
struct udl_drm_connector *udl_connector =
container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
udl_connector->edid = NULL;
}
-
- if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+ udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+ if (!udl_connector->edid)
return connector_status_disconnected;
- udl_connector->edid = (struct edid *)edid_buff;
-
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5930facd6d2d..11a8f99ba18c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
ret = drm_gem_handle_create(file, &obj->base, handle);
drm_gem_object_put_unlocked(&obj->base);
if (ret)
- goto err;
+ return ERR_PTR(ret);
return &obj->base;
-
-err:
- __vgem_gem_destroy(obj);
- return ERR_PTR(ret);
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 138b0bb325cf..69048e73377d 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
ret = drm_gem_handle_create(file, &obj->gem, handle);
drm_gem_object_put_unlocked(&obj->gem);
- if (ret) {
- drm_gem_object_release(&obj->gem);
- kfree(obj);
+ if (ret)
return ERR_PTR(ret);
- }
return &obj->gem;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..4ca0cdfa6b33 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
tristate "Asus"
depends on LEDS_CLASS
depends on ASUS_WMI || ASUS_WMI=n
+ select POWER_SUPPLY
---help---
Support for Asus notebook built-in keyboard and touchpad via i2c, and
the Asus Republic of Gamers laptop keyboard special keys.
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..860e21ec6a49 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n)
{
- if (n > 32) {
- hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
+ if (n > 256) {
+ hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
n, current->comm);
- n = 32;
+ n = 256;
}
return __extract(report, offset, n);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
seq_printf(f, "\n\n");
/* dump parsed data and input mappings */
+ if (down_interruptible(&hdev->driver_input_lock))
+ return 0;
+
hid_dump_device(hdev, f);
seq_printf(f, "\n");
hid_dump_input_mapping(hdev, f);
+ up(&hdev->driver_input_lock);
+
return 0;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..adce58f24f76 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1083,6 +1083,7 @@
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..1fce0076e7dc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1b8: map_key_clear(KEY_VIDEO); break;
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
+ case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..199cc256e9d9 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
kfree(data);
return -ENOMEM;
}
+ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+ if (!data->wq) {
+ kfree(data->effect_ids);
+ kfree(data);
+ return -ENOMEM;
+ }
+
data->hidpp = hidpp;
data->feature_index = feature_index;
data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */
- data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_rel(mydata->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- hidpp_scroll_counter_handle_scroll(
- &hidpp->vertical_wheel_counter, v);
+ if (v != 0)
+ hidpp_scroll_counter_handle_scroll(
+ &hidpp->vertical_wheel_counter, v);
input_sync(mydata->input);
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..77ffba48cc73 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ }
};
-/**
+/*
* hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
*
* There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
if (hdev->product == 0x0401 &&
strncmp(hdev->name, "ELAN0800", 8) != 0)
return true;
+ /* Same with product id 0x0400 */
+ if (hdev->product == 0x0400 &&
+ strncmp(hdev->name, "QTEC0001", 8) != 0)
+ return true;
break;
}
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
}
if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
bl_entry->driver_data, bl_entry->vendor,
bl_entry->product);
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
quirks |= bl_entry->driver_data;
if (quirks)
- dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
quirks, hdev->vendor, hdev->product);
return quirks;
}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
static int steam_register(struct steam_device *steam)
{
int ret;
+ bool client_opened;
/*
* This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
* Unlikely, but getting the serial could fail, and it is not so
* important, so make up a serial number and go on.
*/
+ mutex_lock(&steam->mutex);
if (steam_get_serial(steam) < 0)
strlcpy(steam->serial_no, "XXXXXXXXXX",
sizeof(steam->serial_no));
+ mutex_unlock(&steam->mutex);
hid_info(steam->hdev, "Steam Controller '%s' connected",
steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
}
mutex_lock(&steam->mutex);
- if (!steam->client_opened) {
+ client_opened = steam->client_opened;
+ if (!client_opened)
steam_set_lizard_mode(steam, lizard_mode);
+ mutex_unlock(&steam->mutex);
+
+ if (!client_opened)
ret = steam_input_register(steam);
- } else {
+ else
ret = 0;
- }
- mutex_unlock(&steam->mutex);
return ret;
}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
+ unsigned long flags;
+ bool connected;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
mutex_lock(&steam->mutex);
steam->client_opened = false;
+ if (connected)
+ steam_set_lizard_mode(steam, lizard_mode);
mutex_unlock(&steam->mutex);
- if (steam->connected) {
- steam_set_lizard_mode(steam, lizard_mode);
+ if (connected)
steam_input_register(steam);
- }
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
goto cleanup;
}
rc = usb_string(udev, 201, ver_ptr, ver_len);
- if (ver_ptr == NULL) {
- rc = -ENOMEM;
- goto cleanup;
- }
if (rc == -EPIPE) {
*ver_ptr = '\0';
} else if (rc < 0) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90164fed08d3..4d1f24ee249c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_NO_RUNTIME_PM },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
+ { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
+ I2C_HID_QUIRK_NO_RUNTIME_PM },
{ 0, 0 }
};
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..d0f1dfe2bcbb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
config SENSORS_W83773G
tristate "Nuvoton W83773G"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the Nuvoton W83773G hardware
monitoring chip.
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..f9abeeeead9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
};
static const u32 ntc_temp_config[] = {
- HWMON_T_INPUT, HWMON_T_TYPE,
+ HWMON_T_INPUT | HWMON_T_TYPE,
0
};
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..4679acb4918e 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s++;
}
}
+
+ s = (sensors->power.num_sensors * 4) + 1;
} else {
for (i = 0; i < sensors->power.num_sensors; ++i) {
s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
show_power, NULL, 3, i);
attr++;
}
- }
- if (sensors->caps.num_sensors >= 1) {
s = sensors->power.num_sensors + 1;
+ }
+ if (sensors->caps.num_sensors >= 1) {
snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
0, 0);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f2c681971201..f8979abb9a19 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
Cannon Lake (PCH)
Cedar Fork (PCH)
Ice Lake (PCH)
+ Comet Lake (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145ef5a5..679c6c41f64b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
* Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
+ * Comet Lake (PCH) 0x02a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -240,6 +241,7 @@
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
struct i801_mux_config {
char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
{ 0, }
};
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 21cb088d6687..f7cdd2ab7f11 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3169,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ int type, prot = 0;
size_t length;
- int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end)
continue;
+ type = IOMMU_RESV_DIRECT;
length = entry->address_end - entry->address_start;
if (entry->prot & IOMMU_PROT_IR)
prot |= IOMMU_READ;
if (entry->prot & IOMMU_PROT_IW)
prot |= IOMMU_WRITE;
+ if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+ /* Exclusion range */
+ type = IOMMU_RESV_RESERVED;
region = iommu_alloc_resv_region(entry->address_start,
- length, prot,
- IOMMU_RESV_DIRECT);
+ length, prot, type);
if (!region) {
dev_err(dev, "Out of memory allocating dm-regions\n");
return;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index f773792d77fd..1b1378619fc9 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
if (e == NULL)
return -ENOMEM;
+ if (m->flags & IVMD_FLAG_EXCL_RANGE)
+ init_exclusion_range(m);
+
switch (m->type) {
default:
kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
while (p < end) {
m = (struct ivmd_header *)p;
- if (m->flags & IVMD_FLAG_EXCL_RANGE)
- init_exclusion_range(m);
- else if (m->flags & IVMD_FLAG_UNITY_MAP)
+ if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
init_unity_map_range(m);
p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741f72dc..87965e4d9647 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -374,6 +374,8 @@
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
+
/* IOMMU capabilities */
#define IOMMU_CAP_IOTLB 24
#define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f101afc315ab..9a8a8870e267 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -160,6 +160,14 @@
#define ARM_V7S_TCR_PD1 BIT(5)
+#ifdef CONFIG_ZONE_DMA32
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
+#else
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
+#endif
+
typedef u32 arm_v7s_iopte;
static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
void *table = NULL;
if (lvl == 1)
- table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+ table = (void *)__get_free_pages(
+ __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
else if (lvl == 2)
- table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+ table = kmem_cache_zalloc(data->l2_tables, gfp);
phys = virt_to_phys(table);
- if (phys != (arm_v7s_iopte)phys)
+ if (phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
+ dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
+ }
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
ARM_V7S_TABLE_SIZE(2),
ARM_V7S_TABLE_SIZE(2),
- SLAB_CACHE_DMA, NULL);
+ ARM_V7S_TABLE_SLAB_FLAGS, NULL);
if (!data->l2_tables)
goto out_free_data;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33a982e33716..109de67d5d72 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
- dev_warn(dev,
- "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
- iommu_def_domain_type);
dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+ if (dom) {
+ dev_warn(dev,
+ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+ iommu_def_domain_type);
+ }
}
group->default_domain = dom;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b0c15d..7cb4d685a1f1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int devid;
+ const struct of_device_id *of_id;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- devid = (int)(uintptr_t)of_match_device(
- of_pca9532_leds_match, &client->dev)->data;
+ of_id = of_match_device(of_pca9532_leds_match,
+ &client->dev);
+ if (unlikely(!of_id))
+ return -EINVAL;
+ devid = (int)(uintptr_t) of_id->data;
} else {
devid = id->driver_data;
}
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed46d473..136f86a1627d 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
trigger_data->net_dev = NULL;
}
- strncpy(trigger_data->device_name, buf, size);
+ memcpy(trigger_data->device_name, buf, size);
+ trigger_data->device_name[size] = 0;
if (size > 0 && trigger_data->device_name[size - 1] == '\n')
trigger_data->device_name[size - 1] = 0;
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
container_of(nb, struct led_netdev_data, notifier);
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
- && evt != NETDEV_CHANGENAME)
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
return NOTIFY_DONE;
- if (strcmp(dev->name, trigger_data->device_name))
+ if (!(dev == trigger_data->net_dev ||
+ (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
return NOTIFY_DONE;
cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
dev_hold(dev);
trigger_data->net_dev = dev;
break;
- case NETDEV_CHANGENAME:
case NETDEV_UNREGISTER:
- if (trigger_data->net_dev) {
- dev_put(trigger_data->net_dev);
- trigger_data->net_dev = NULL;
- }
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
break;
case NETDEV_UP:
case NETDEV_CHANGE:
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0ce2d8dfc5f1..26ad6468d13a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
config MFD_SUN6I_PRCM
bool "Allwinner A31 PRCM controller"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
select MFD_CORE
help
Support for the PRCM (Power/Reset/Clock Management) unit available
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
static const struct mfd_cell sprd_pmic_devs[] = {
{
.name = "sc27xx-wdt",
- .of_compatible = "sprd,sc27xx-wdt",
+ .of_compatible = "sprd,sc2731-wdt",
}, {
.name = "sc27xx-rtc",
- .of_compatible = "sprd,sc27xx-rtc",
+ .of_compatible = "sprd,sc2731-rtc",
}, {
.name = "sc27xx-charger",
- .of_compatible = "sprd,sc27xx-charger",
+ .of_compatible = "sprd,sc2731-charger",
}, {
.name = "sc27xx-chg-timer",
- .of_compatible = "sprd,sc27xx-chg-timer",
+ .of_compatible = "sprd,sc2731-chg-timer",
}, {
.name = "sc27xx-fast-chg",
- .of_compatible = "sprd,sc27xx-fast-chg",
+ .of_compatible = "sprd,sc2731-fast-chg",
}, {
.name = "sc27xx-chg-wdt",
- .of_compatible = "sprd,sc27xx-chg-wdt",
+ .of_compatible = "sprd,sc2731-chg-wdt",
}, {
.name = "sc27xx-typec",
- .of_compatible = "sprd,sc27xx-typec",
+ .of_compatible = "sprd,sc2731-typec",
}, {
.name = "sc27xx-flash",
- .of_compatible = "sprd,sc27xx-flash",
+ .of_compatible = "sprd,sc2731-flash",
}, {
.name = "sc27xx-eic",
- .of_compatible = "sprd,sc27xx-eic",
+ .of_compatible = "sprd,sc2731-eic",
}, {
.name = "sc27xx-efuse",
- .of_compatible = "sprd,sc27xx-efuse",
+ .of_compatible = "sprd,sc2731-efuse",
}, {
.name = "sc27xx-thermal",
- .of_compatible = "sprd,sc27xx-thermal",
+ .of_compatible = "sprd,sc2731-thermal",
}, {
.name = "sc27xx-adc",
- .of_compatible = "sprd,sc27xx-adc",
+ .of_compatible = "sprd,sc2731-adc",
}, {
.name = "sc27xx-audio-codec",
- .of_compatible = "sprd,sc27xx-audio-codec",
+ .of_compatible = "sprd,sc2731-audio-codec",
}, {
.name = "sc27xx-regulator",
- .of_compatible = "sprd,sc27xx-regulator",
+ .of_compatible = "sprd,sc2731-regulator",
}, {
.name = "sc27xx-vibrator",
- .of_compatible = "sprd,sc27xx-vibrator",
+ .of_compatible = "sprd,sc2731-vibrator",
}, {
.name = "sc27xx-keypad-led",
- .of_compatible = "sprd,sc27xx-keypad-led",
+ .of_compatible = "sprd,sc2731-keypad-led",
}, {
.name = "sc27xx-bltc",
- .of_compatible = "sprd,sc27xx-bltc",
+ .of_compatible = "sprd,sc2731-bltc",
}, {
.name = "sc27xx-fgu",
- .of_compatible = "sprd,sc27xx-fgu",
+ .of_compatible = "sprd,sc2731-fgu",
}, {
.name = "sc27xx-7sreset",
- .of_compatible = "sprd,sc27xx-7sreset",
+ .of_compatible = "sprd,sc2731-7sreset",
}, {
.name = "sc27xx-poweroff",
- .of_compatible = "sprd,sc27xx-poweroff",
+ .of_compatible = "sprd,sc2731-poweroff",
}, {
.name = "sc27xx-syscon",
- .of_compatible = "sprd,sc27xx-syscon",
+ .of_compatible = "sprd,sc2731-syscon",
},
};
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
return status;
}
+static int __maybe_unused twl_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq)
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
+
static const struct i2c_device_id twl_ids[] = {
{ "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
{ "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
/* One Client Driver , 4 Clients */
static struct i2c_driver twl_driver = {
.driver.name = DRIVER_NAME,
+ .driver.pm = &twl_dev_pm_ops,
.id_table = twl_ids,
.probe = twl_probe,
.remove = twl_remove,
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236ed8d9..19c84214a7ea 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
/* We also need to update CI for internal queues */
if (cs->submitted) {
+ int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+ WARN_ONCE((cs_cnt < 0),
+ "hl%d: error in CS active cnt %d\n",
+ hdev->id, cs_cnt);
+
hl_int_hw_queue_update_ci(cs);
spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12aff6ad..974a87789bd8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
enum vm_type_t *vm_type;
bool once = true;
+ u64 j;
int i;
if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
} else {
phys_pg_pack = hnode->ptr;
seq_printf(s,
- " 0x%-14llx %-10u %-4u\n",
+ " 0x%-14llx %-10llu %-4u\n",
hnode->vaddr, phys_pg_pack->total_size,
phys_pg_pack->handle);
}
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
phys_pg_pack->page_size);
seq_puts(s, " physical address\n");
seq_puts(s, "---------------------\n");
- for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+ for (j = 0 ; j < phys_pg_pack->npages ; j++) {
seq_printf(s, " 0x%-14llx\n",
- phys_pg_pack->pages[i]);
+ phys_pg_pack->pages[j]);
}
}
spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6ed154..77d51be66c7e 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
#include <linux/sched/signal.h>
#include <linux/hwmon.h>
+#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
+
bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
{
if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0);
atomic_set(&hdev->fd_open_cnt, 0);
+ atomic_set(&hdev->cs_active_cnt, 0);
return 0;
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
pci_save_state(hdev->pdev);
+ /* Block future CS/VM/JOB completion operations */
+ rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+ if (rc) {
+ dev_err(hdev->dev, "Can't suspend while in reset\n");
+ return -EIO;
+ }
+
+ /* This blocks all other stuff that is not blocked by in_reset */
+ hdev->disabled = true;
+
+ /*
+ * Flush anyone that is inside the critical section of enqueue
+ * jobs to the H/W
+ */
+ hdev->asic_funcs->hw_queues_lock(hdev);
+ hdev->asic_funcs->hw_queues_unlock(hdev);
+
+ /* Flush processes that are sending message to CPU */
+ mutex_lock(&hdev->send_cpu_message_lock);
+ mutex_unlock(&hdev->send_cpu_message_lock);
+
rc = hdev->asic_funcs->suspend(hdev);
if (rc)
dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
pci_set_power_state(hdev->pdev, PCI_D0);
pci_restore_state(hdev->pdev);
- rc = pci_enable_device(hdev->pdev);
+ rc = pci_enable_device_mem(hdev->pdev);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI device in resume\n");
return rc;
}
+ pci_set_master(hdev->pdev);
+
rc = hdev->asic_funcs->resume(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed to enable PCI access from device CPU\n");
- return rc;
+ dev_err(hdev->dev, "Failed to resume device after suspend\n");
+ goto disable_device;
+ }
+
+
+ hdev->disabled = false;
+ atomic_set(&hdev->in_reset, 0);
+
+ rc = hl_device_reset(hdev, true, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to reset device during resume\n");
+ goto disable_device;
}
return 0;
+
+disable_device:
+ pci_clear_master(hdev->pdev);
+ pci_disable_device(hdev->pdev);
+
+ return rc;
}
static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
struct hl_device_reset_work *device_reset_work =
container_of(work, struct hl_device_reset_work, reset_work);
struct hl_device *hdev = device_reset_work->hdev;
- u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+ u16 pending_total, pending_cnt;
struct task_struct *task = NULL;
+ if (hdev->pldm)
+ pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+ else
+ pending_total = HL_PENDING_RESET_PER_SEC;
+
+ pending_cnt = pending_total;
+
/* Flush all processes that are inside hl_open */
mutex_lock(&hdev->fd_open_cnt_lock);
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
}
}
+ pending_cnt = pending_total;
+
+ while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+ pending_cnt--;
+
+ ssleep(1);
+ }
+
+ if (atomic_read(&hdev->fd_open_cnt))
+ dev_crit(hdev->dev,
+ "Going to hard reset with open user contexts\n");
+
mutex_unlock(&hdev->fd_open_cnt_lock);
hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c541b..ea979ebd62fb 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
return retval;
}
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
- WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
- WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
/*
* goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
*
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
return retval;
}
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
- WREG32(mmMME_QM_GLBL_CFG1, 0);
- WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC0_QM_GLBL_CFG1, 0);
- WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC1_QM_GLBL_CFG1, 0);
- WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC2_QM_GLBL_CFG1, 0);
- WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC3_QM_GLBL_CFG1, 0);
- WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC4_QM_GLBL_CFG1, 0);
- WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC5_QM_GLBL_CFG1, 0);
- WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC6_QM_GLBL_CFG1, 0);
- WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
- WREG32(mmTPC7_QM_GLBL_CFG1, 0);
- WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
static void goya_dma_stall(struct hl_device *hdev)
{
WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
{
int rc;
- rc = goya_stop_internal_queues(hdev);
-
- if (rc) {
- dev_err(hdev->dev, "failed to stop internal queues\n");
- return rc;
- }
-
- rc = goya_stop_external_queues(hdev);
-
- if (rc) {
- dev_err(hdev->dev, "failed to stop external queues\n");
- return rc;
- }
-
rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
int goya_resume(struct hl_device *hdev)
{
- int rc;
-
- goya_resume_external_queues(hdev);
- goya_resume_internal_queues(hdev);
-
- rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
- if (rc)
- dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
- return rc;
+ return goya_init_iatu(hdev);
}
static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
*dma_handle = hdev->asic_prop.sram_base_address;
- base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+ base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
switch (queue_id) {
case GOYA_QUEUE_ID_MME:
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9f9b9a..a8ee52c880cd 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
* struct hl_vm_phys_pg_pack - physical page pack.
* @vm_type: describes the type of the virtual area descriptor.
* @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings.
* @asid: the context related to this list.
- * @npages: num physical pages in the pack.
* @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
* @flags: HL_MEM_* flags related to this list.
* @handle: the provided handle related to this list.
* @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
struct hl_vm_phys_pg_pack {
enum vm_type_t vm_type; /* must be first */
u64 *pages;
+ u64 npages;
+ u64 total_size;
atomic_t mapping_cnt;
u32 asid;
- u32 npages;
u32 page_size;
- u32 total_size;
u32 flags;
u32 handle;
u32 offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
* @cb_pool_lock: protects the CB pool.
* @user_ctx: current user context executing.
* @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, KMD will restore this
* value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ * means already in H/W queues)
* @major: habanalabs KMD major.
* @high_pll: high PLL profile frequency.
* @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
struct hl_ctx *user_ctx;
atomic64_t dram_used_mem;
+ u64 timeout_jiffies;
+ u64 max_power;
atomic_t in_reset;
atomic_t curr_pll_profile;
atomic_t fd_open_cnt;
- u64 timeout_jiffies;
- u64 max_power;
+ atomic_t cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece26417c..ef3bb6951360 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
- list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+ atomic_inc(&hdev->cs_active_cnt);
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
if (job->ext_queue)
ext_hw_queue_schedule_job(job);
else
int_hw_queue_schedule_job(job);
- }
cs->submitted = true;
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1a5274..ce1fda40a8b8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_device *hdev = ctx->hdev;
struct hl_vm *vm = &hdev->vm;
struct hl_vm_phys_pg_pack *phys_pg_pack;
- u64 paddr = 0;
- u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
- int handle, rc, i;
+ u64 paddr = 0, total_size, num_pgs, i;
+ u32 num_curr_pgs, page_size, page_shift;
+ int handle, rc;
bool contiguous;
num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
- "failed to allocate %u huge contiguous pages\n",
+ "failed to allocate %llu huge contiguous pages\n",
num_pgs);
return -ENOMEM;
}
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
phys_pg_pack->flags = args->flags;
phys_pg_pack->contiguous = contiguous;
- phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+ phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
page_size);
- kfree(phys_pg_pack->pages);
+ kvfree(phys_pg_pack->pages);
pages_arr_err:
kfree(phys_pg_pack);
pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
- int i;
+ u64 i;
if (!phys_pg_pack->created_from_userptr) {
if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
}
}
- kfree(phys_pg_pack->pages);
+ kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
}
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u32 size, u64 hint_addr,
+ struct hl_va_range *va_range, u64 size, u64 hint_addr,
bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
}
if (!new_va_block) {
- dev_err(hdev->dev, "no available va block for size %u\n", size);
+ dev_err(hdev->dev, "no available va block for size %llu\n",
+ size);
goto out;
}
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
- u64 page_mask;
- u32 npages, total_npages, page_size = PAGE_SIZE;
+ u64 page_mask, total_npages;
+ u32 npages, page_size = PAGE_SIZE;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
page_mask = ~(((u64) page_size) - 1);
- phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+ phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+ GFP_KERNEL);
if (!phys_pg_pack->pages) {
rc = -ENOMEM;
goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
- u64 next_vaddr = vaddr, paddr;
+ u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
u32 page_size = phys_pg_pack->page_size;
- int i, rc = 0, mapped_pg_cnt = 0;
+ int rc = 0;
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
if (rc) {
dev_err(hdev->dev,
- "map failed for handle %u, npages: %d, mapped: %d",
+ "map failed for handle %u, npages: %llu, mapped: %llu",
phys_pg_pack->handle, phys_pg_pack->npages,
mapped_pg_cnt);
goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
enum vm_type_t *vm_type;
- u64 next_vaddr;
+ u64 next_vaddr, i;
u32 page_size;
bool is_userptr;
- int i, rc;
+ int rc;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99cb2743..3a5a2cec8305 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ err:
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
- u64 real_virt_addr;
+ u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
npages = page_size / real_page_size;
real_virt_addr = virt_addr;
+ real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+ rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
real_page_size);
if (rc)
goto err;
real_virt_addr += real_page_size;
+ real_phys_addr += real_page_size;
mapped_cnt++;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+ return sprintf(buf, "%*phC\n",
+ slave->dev->addr_len,
+ slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane < 0)
+ if (lane < 0 && lane != -ENODEV)
return lane;
- if (chip->ports[port].serdes_irq) {
- err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (lane >= 0) {
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
}
- err = mv88e6390x_serdes_power(chip, port, false);
- if (err)
- return err;
+ chip->ports[port].cmode = 0;
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
+ chip->ports[port].cmode = cmode;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return lane;
+
err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
}
}
- chip->ports[port].cmode = cmode;
-
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..28eac9056211 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
- cancel_delayed_work_sync(&nic->link_change_work);
-
/* wait till all queued set_rx_mode tasks completes */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq) {
+ cancel_delayed_work_sync(&nic->link_change_work);
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+ }
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
/* wait till all queued set_rx_mode tasks completes if any */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq)
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
/* Send VF config done msg to PF */
nicvf_send_cfg_done(nic);
- INIT_DELAYED_WORK(&nic->link_change_work,
- nicvf_link_status_check_task);
- queue_delayed_work(nic->nicvf_rx_mode_wq,
- &nic->link_change_work, 0);
+ if (nic->nicvf_rx_mode_wq) {
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
+ }
return 0;
cleanup:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
/* Check if page can be recycled */
if (page) {
ref_count = page_ref_count(page);
- /* Check if this page has been used once i.e 'put_page'
- * called after packet transmission i.e internal ref_count
- * and page's ref_count are equal i.e page can be recycled.
+ /* This page can be recycled if internal ref_count and page's
+ * ref_count are equal, indicating that the page has been used
+ * once for packet transmission. For non-XDP mode, internal
+ * ref_count is always '1'.
*/
- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
- pgcache->ref_count--;
- else
- page = NULL;
-
- /* In non-XDP mode, page's ref_count needs to be '1' for it
- * to be recycled.
- */
- if (!rbdr->is_xdp && (ref_count != 1))
+ if (rbdr->is_xdp) {
+ if (ref_count == pgcache->ref_count)
+ pgcache->ref_count--;
+ else
+ page = NULL;
+ } else if (ref_count != 1) {
page = NULL;
+ }
}
if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
while (head < rbdr->pgcnt) {
pgcache = &rbdr->pgcache[head];
if (pgcache->page && page_ref_count(pgcache->page) != 0) {
- if (!rbdr->is_xdp) {
- put_page(pgcache->page);
- continue;
+ if (rbdr->is_xdp) {
+ page_ref_sub(pgcache->page,
+ pgcache->ref_count - 1);
}
- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
put_page(pgcache->page);
}
head++;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
ppmax = max;
/* pool size must be multiple of unsigned long */
- bmap = BITS_TO_LONGS(ppmax);
+ bmap = ppmax / BITS_PER_TYPE(unsigned long);
+ if (!bmap)
+ return NULL;
+
ppmax = (bmap * sizeof(unsigned long)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
if (reserve_factor) {
ppmax_pool = ppmax / reserve_factor;
pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+ if (!pool) {
+ ppmax_pool = 0;
+ reserve_factor = 0;
+ }
pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
- hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
};
struct hnae_queue {
- void __iomem *io_base;
+ u8 __iomem *io_base;
phys_addr_t phy_base;
struct hnae_ae_dev *dev; /* the device who use this queue */
struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
static void hns_mac_param_get(struct mac_params *param,
struct hns_mac_cb *mac_cb)
{
- param->vaddr = (void *)mac_cb->vaddr;
+ param->vaddr = mac_cb->vaddr;
param->mac_mode = hns_get_enet_interface(mac_cb);
ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
/*mac para struct ,mac get param from nic or dsaf when initialize*/
struct mac_params {
char addr[ETH_ALEN];
- void *vaddr; /*virtual address*/
+ u8 __iomem *vaddr; /*virtual address*/
struct device *dev;
u8 mac_id;
/**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
enum mac_mode mac_mode;
u8 mac_id;
struct hns_mac_cb *mac_cb;
- void __iomem *io_base;
+ u8 __iomem *io_base;
unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
unsigned int virt_dev_num;
struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port);
-
- mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
}
/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
/* default config dvc to 0 */
mac_data.tbl_ucast_dvc = 0;
mac_data.tbl_ucast_out_port = mac_entry->port_num;
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
0xff,
mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
/* config mc entry with mask */
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* config key mask */
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
soft_mac_entry += entry_index;
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
} else { /* not zero, just del port, update */
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
&tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
+static int hns_dsaf_get_port_id(u8 port)
+{
+ if (port < DSAF_SERVICE_NW_NUM)
+ return port;
+
+ if (port >= DSAF_BASE_INNER_PORT_NUM)
+ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+ return -EINVAL;
+}
+
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
- port, mask_entry.addr);
+ 0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0;
- if (port < DSAF_SERVICE_NW_NUM) {
- mskid = port;
- } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
- mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
- } else {
+ /* set MAC port to handle multicast */
+ mskid = hns_dsaf_get_port_id(port);
+ if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val);
return;
}
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+ /* set pool bit map to handle multicast */
+ mskid = hns_dsaf_get_port_id(port_num);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev,
+ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port_num,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
+
memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ u8 __iomem *base_addr = mac_cb->serdes_vaddr +
(mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *
+static u8 __iomem *
hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
dsaf_dev->ppe_common[comm_index] = NULL;
}
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
- int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
{
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- void __iomem *io_base;
+ u8 __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
struct ppe_common_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
- void __iomem *io_base;
+ u8 __iomem *io_base;
enum ppe_common_mode ppe_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
} else {
ring = &q->tx_ring;
- ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+ ring->io_base = ring_pair_cb->q.io_base +
HNS_RCB_TX_REG_OFFSET;
irq_idx = HNS_RCB_IRQ_IDX_TX;
mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
#define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift, u32 val)
{
u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
#define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift))
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift)
{
u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
#define dsaf_write_b(addr, data)\
- writeb((data), (__iomem unsigned char *)(addr))
+ writeb((data), (__iomem u8 *)(addr))
#define dsaf_read_b(addr)\
- readb((__iomem unsigned char *)(addr))
+ readb((__iomem u8 *)(addr))
#define hns_mac_reg_read64(drv, offset) \
- readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+ readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
- dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index e37a0ca0db89..297b95c1b3c1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
- ring->stats.tx_pkts++;
- ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
@@ -2151,7 +2149,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2164,7 +2162,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
};
struct hns_mdio_device {
- void *vbase; /* mdio reg base address */
+ u8 __iomem *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
struct hns_mdio_sc_reg sc_reg;
};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- writel_relaxed(value, reg_addr + reg);
+ writel_relaxed(value, base + reg);
}
#define MDIO_WRITE_REG(a, reg, value) \
mdio_write_reg((a)->vbase, (reg), (value))
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- return readl_relaxed(reg_addr + reg);
+ return readl_relaxed(base + reg);
}
#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
u32 val)
{
u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
{
u32 origin;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 20c4e0835ba8..1de691e76b86 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1886,6 +1886,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_completion(&adapter->init_done);
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -4692,7 +4693,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- init_completion(&adapter->init_done);
+ reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4747,7 +4748,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4826,6 +4826,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ init_completion(&adapter->init_done);
adapter->resetting = false;
adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e2fa112bed9a..2325cee76211 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
+ if (!fm10k_workqueue)
+ return -ENOMEM;
fm10k_dbg_init();
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
- bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
- int qid = ring->queue_index;
-
- if (ring_is_xdp(ring))
- qid -= ring->vsi->alloc_queue_pairs;
-
- if (!xdp_on)
- return NULL;
-
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
- | (wol->wolopts != WAKE_FILTER))
+ if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
/* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
+ if (type == I40E_VSI_MAIN) {
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_rings;
+ }
+
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
goto unlock_pf;
err_rings:
+ bitmap_free(vsi->af_xdp_zc_qps);
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ bitmap_free(vsi->af_xdp_zc_qps);
i40e_vsi_free_arrays(vsi, true);
i40e_clear_rss_config_user(vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now;
+ struct timespec64 now, then;
+ then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now, NULL);
- timespec64_add_ns(&now, delta);
+ now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
+ clear_bit(qid, vsi->af_xdp_zc_qps);
i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 32d61d5a2706..acbb5b4f333d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8743,9 +8743,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
+ bool wake;
rtnl_lock();
netif_device_detach(netdev);
@@ -8758,14 +8756,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- if (!runtime) {
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
- }
-#endif
-
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -8782,10 +8772,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@@ -8799,12 +8785,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
- *enable_wake = wufc || adapter->en_mng_pt;
- if (!*enable_wake)
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
+ if (enable_wake)
+ *enable_wake = wake;
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -8847,22 +8836,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev)
{
- int retval;
- bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- retval = __igb_shutdown(pdev, &wake, 0);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
static int __maybe_unused igb_resume(struct device *dev)
@@ -8933,22 +8907,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 1);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
struct mii_bus *bus;
+ int err = -ENODEV;
- adapter->mii_bus = devm_mdiobus_alloc(dev);
- if (!adapter->mii_bus)
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
return -ENOMEM;
- bus = adapter->mii_bus;
-
switch (hw->device_id) {
/* C3000 SoCs */
case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*/
hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
- return mdiobus_register(bus);
+ err = mdiobus_register(bus);
+ if (!err) {
+ adapter->mii_bus = bus;
+ return 0;
+ }
ixgbe_no_mii_bus:
devm_mdiobus_free(dev, bus);
- adapter->mii_bus = NULL;
- return -ENODEV;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
if (!eproto)
return -EINVAL;
- if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
- return -EOPNOTSUPP;
-
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index b0ce68feb0f3..633b117eb13e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
return err;
}
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
{
u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
- if (err) {
- mlx5_core_warn(priv->mdev, "cannot get port speed\n");
- return 0;
- }
+ if (err)
+ speed = SPEED_40000;
+ speed = max_t(u32, speed, SPEED_40000);
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- u32 xoff, unsigned int mtu)
+ u32 xoff, unsigned int max_mtu)
{
int i;
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
if (port_buffer->buffer[i].size <
- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
return -ENOMEM;
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
+ port_buffer->buffer[i].xon =
+ port_buffer->buffer[i].xoff - max_mtu;
}
return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
/**
* update_buffer_lossy - Update buffer configuration based on pfc
- * @mtu: device's MTU
+ * @max_mtu: netdev's max_mtu
* @pfc_en: <input> current pfc configuration
* @buffer: <input> current prio to buffer mapping
* @xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @return: 0 if no error,
* sets change to true if buffer configuration was modified.
*/
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
}
if (changed) {
- err = update_xoff_threshold(port_buffer, xoff, mtu);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu);
if (err)
return err;
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
return 0;
}
+#define MINIMUM_MAX_MTU 9216
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
bool update_prio2buffer = false;
u8 buffer[MLX5E_MAX_PRIORITY];
bool update_buffer = false;
+ unsigned int max_mtu;
u32 total_used = 0;
u8 curr_pfc_en;
int err;
int i;
mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
&port_buffer, &update_buffer);
if (err)
return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
- &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ xoff, &port_buffer, &update_buffer);
if (err)
return err;
}
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL;
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
if (err)
return err;
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return 0;
}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+ mutex_init(&mdev->mlx5e_res.td.list_lock);
return 0;
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
- int err = -ENOMEM;
+ int err = 0;
u32 tirn = 0;
int inlen;
void *in;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ if (!in) {
+ err = -ENOMEM;
goto out;
+ }
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..5efce4a3ff79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
- unsigned long *advertising_modes,
- u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap, bool ext)
{
unsigned long proto_cap = eth_proto_cap;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+ table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+ max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+ ARRAY_SIZE(ptys2legacy_ethtool_table);
+
for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(advertising_modes, advertising_modes,
table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
- u8 tx_pause, u8 rx_pause,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings,
+ bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+ ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+ bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
- ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
u8 an_disable_admin;
u8 an_status;
u8 connector_type;
+ bool admin_ext;
bool ext;
int err;
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_admin);
+ /* Fields: eth_proto_admin and ext_eth_proto_admin are
+ * mutually exclusive. Hence try reading legacy advertising
+ * when extended advertising is zero.
+ * admin_ext indicates how eth_proto_admin should be
+ * interpreted
+ */
+ admin_ext = ext;
+ if (ext && !eth_proto_admin) {
+ eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+ eth_proto_admin);
+ admin_ext = false;
+ }
+
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
get_supported(mdev, eth_proto_cap, link_ksettings);
- get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+ get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+ admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
- ext_requested = (link_ksettings->link_modes.advertising[0] >
- MLX5E_PTYS_EXT);
+ ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+ MLX5E_PTYS_EXT ||
+ link_ksettings->link_modes.advertising[1]);
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
- /*when ptys_extended_ethernet is set legacy link modes are deprecated */
- if (ext_requested != ext_supported)
- return -EPROTONOSUPPORT;
+ ext_requested &= ext_supported;
speed = link_ksettings->base.speed;
ethtool2ptys_adver_func = ext_requested ?
mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
- err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
mlx5_toggle_port_link(mdev);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2fd425a7b156..ffc4a36551c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2161,6 +2161,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
return true;
}
+struct ip_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+ __be16 payload_len;
+ __u8 nexthdr;
+ __u8 hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+ u32 mask, offset;
+ u8 htype;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+ mask = ~act->mangle.mask;
+ /* For IPv4 & IPv6 header check 4 byte word,
+ * to determine that modified fields
+ * are NOT ttl & hop_limit only.
+ */
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+ struct ip_ttl_word *ttl_word =
+ (struct ip_ttl_word *)&mask;
+
+ if (offset != offsetof(struct iphdr, ttl) ||
+ ttl_word->protocol ||
+ ttl_word->check) {
+ return true;
+ }
+ } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ struct ipv6_hoplimit_word *hoplimit_word =
+ (struct ipv6_hoplimit_word *)&mask;
+
+ if (offset != offsetof(struct ipv6hdr, payload_len) ||
+ hoplimit_word->payload_len ||
+ hoplimit_word->nexthdr) {
+ return true;
+ }
+ }
+ return false;
+}
+
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
u32 actions,
@@ -2168,9 +2214,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct flow_action_entry *act;
bool modify_ip_header;
- u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
+ u8 ip_proto;
int i;
if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2190,9 +2236,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- htype = act->mangle.htype;
- if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
- htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ if (is_action_keys_supported(act)) {
modify_ip_header = true;
break;
}
@@ -2381,15 +2425,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return 0;
}
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
- struct ip_tunnel_key *b)
+struct encap_key {
+ struct ip_tunnel_key *ip_tun_key;
+ int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+ struct encap_key *b)
{
- return memcmp(a, b, sizeof(*a));
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ a->tunnel_type != b->tunnel_type;
}
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
{
- return jhash(key, sizeof(*key), 0);
+ return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ key->tunnel_type);
}
@@ -2420,7 +2471,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct ip_tunnel_info *tun_info;
- struct ip_tunnel_key *key;
+ struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
@@ -2430,13 +2481,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = &parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
- key = &tun_info->key;
+ key.ip_tun_key = &tun_info->key;
+ key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
- hash_key = hash_encap_info(key);
+ hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- if (!cmp_encap_info(&e->tun_info.key, key)) {
+ e_key.ip_tun_key = &e->tun_info.key;
+ e_key.tunnel_type = e->tunnel_type;
+ if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
}
@@ -2717,7 +2771,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
parse_attr, hdrs, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
int err;
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
err = esw_create_legacy_vepa_table(esw);
if (err)
return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
/* Star rule to forward all traffic to uplink vport */
memset(spec, 0, sizeof(*spec));
+ memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 6c72f33f6d09..fe770cd2151c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1609,6 +1609,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
+ memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..8de64e88c670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
void *cmd;
int ret;
+ rcu_read_lock();
+ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+ rcu_read_unlock();
+
+ if (!flow) {
+ WARN_ONCE(1, "Received NULL pointer for handle\n");
+ return -EINVAL;
+ }
+
buf = kzalloc(size, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
cmd = (buf + 1);
- rcu_read_lock();
- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- rcu_read_unlock();
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+ if (ret < 0)
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
.size = 8,
.limit = 4
},
- .mr_cache[16] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[17] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[18] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[19] = {
- .size = 4,
- .limit = 2
- },
- .mr_cache[20] = {
- .size = 4,
- .limit = 2
- },
},
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 6e2a6caec3fb..c56e31d9f8a4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
tmp_push_vlan_tci =
FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
- NFP_FL_PUSH_VLAN_CFI;
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- if (match.key->vlan_id || match.key->vlan_priority) {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.key->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.key->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- ext->tci = cpu_to_be16(tmp_tci);
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.mask->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.mask->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- msk->tci = cpu_to_be16(tmp_tci);
- }
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.key->vlan_id);
+ ext->tci = cpu_to_be16(tmp_tci);
+
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.mask->vlan_id);
+ msk->tci = cpu_to_be16(tmp_tci);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index c3ad083d36c6..08e9bfa95f9b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = dev_queue_xmit(skb);
nfp_repr_inc_tx_stats(netdev, len, ret);
- return ret;
+ return NETDEV_TX_OK;
}
static int nfp_repr_stop(struct net_device *netdev)
@@ -382,7 +382,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
- netdev->priv_flags |= IFF_NO_QUEUE;
+ netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a8ca26c2ae0c..88eb9e05d2a1 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5460,7 +5460,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
tp->cp_cmd |= PktCntrDisable | INTT_1;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_W16(tp, IntrMitigate, 0x5151);
+ RTL_W16(tp, IntrMitigate, 0x5100);
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+ int bfsize)
{
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ if (bfsize == BUF_SIZE_16KiB)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
}
/* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
- << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ if (bfsize >= BUF_SIZE_2KiB) {
+ int bfsize2;
+
+ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
+ }
if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwmac4_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwxgmac2_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
- !!(rdes0 & RDES0_FRAME_TYPE),
- !!(rdes0 & ERDES0_RX_MAC_ADDR));
+ if (likely(ret == good_frame))
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
else
- ehn_desc_rx_set_on_ring(p, end);
+ ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
- int end);
+ int end, int bfsize);
/* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..b7dd4e3c760d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- pr_warn("%s: Oversized frame spanned multiple buffers\n",
- __func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
- int end)
+ int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
else
- ndesc_rx_set_on_ring(p, end);
+ ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6a2e1031a62a..a26e36dbb5df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
}
/**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
- unsigned int entry = rx_q->cur_rx;
+ unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
- unsigned int next_entry;
unsigned int count = 0;
bool xmac;
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- int status;
+ int entry, status;
struct dma_desc *p;
struct dma_desc *np;
+ entry = next_entry;
+
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "len %d larger than size (%d)\n",
+ frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
- break;
+ continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dev_warn(priv->device,
"packet dropped\n");
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
- netdev_err(priv->dev,
- "%s: Inconsistent Rx chain\n",
- priv->dev->name);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx chain\n",
+ priv->dev->name);
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
prefetch(skb->data - NET_IP_ALIGN);
rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
- entry = next_entry;
}
stmmac_rx_refill(priv, queue);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool destroy;
+ bool tx_disable; /* if true, do not wake up queue again */
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9a022539d305..fdbeb7070d42 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
+ net_device->tx_disable = false;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
} else {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
- if (netif_tx_queue_stopped(txq) &&
+ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
- if (atomic_read(&nvchan->queue_sends) < 1) {
+ if (atomic_read(&nvchan->queue_sends) < 1 &&
+ !net_device->tx_disable) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1a08679f90ce..06393b215102 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
rcu_read_unlock();
}
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ nvscdev->tx_disable = false;
+ virt_wmb(); /* ensure queue wake up mechanism is on */
+
+ netif_tx_wake_all_queues(ndev);
+}
+
static int netvsc_open(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
rdev = nvdev->extension;
if (!rdev->link_state) {
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(nvdev, net);
}
if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
}
}
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ if (nvscdev) {
+ nvscdev->tx_disable = true;
+ virt_wmb(); /* ensure txq will not wake up after stop */
+ }
+
+ netif_tx_disable(ndev);
+}
+
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
- netif_tx_disable(net);
+ netvsc_tx_disable(nvdev, net);
/* No need to close rndis filter if it is removed already */
if (!nvdev)
@@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
- netif_tx_disable(ndev);
+ netvsc_tx_disable(nvdev, ndev);
ret = rndis_filter_close(nvdev);
if (ret) {
@@ -1906,7 +1926,7 @@ static void netvsc_link_change(struct work_struct *w)
if (rdev->link_state) {
rdev->link_state = false;
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(net_device, net);
} else {
notify = true;
}
@@ -1916,7 +1936,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
}
kfree(event);
break;
@@ -1925,7 +1945,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..9195f3476b1d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..6d1a1abbed27 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_RX_HANDLER;
dev->min_mtu = 0;
dev->max_mtu = 0;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2839bb70badf..f0716f6ce41f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
struct nvme_ns *ns)
{
- enum nvme_ana_state old;
-
mutex_lock(&ns->head->lock);
- old = ns->ana_state;
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
- if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+ if (nvme_state_is_live(ns->ana_state))
nvme_mpath_set_live(ns);
mutex_unlock(&ns->head->lock);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e7e08889865e..68c49dd67210 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return ret;
}
-static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
union nvme_result res = {};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2d73b66e3686..b3e765a95af8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
ret = nvmet_p2pmem_ns_enable(ns);
if (ret)
- goto out_unlock;
+ goto out_dev_disable;
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 3e43212d3c1c..bc6ebb51b0bf 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -75,11 +75,11 @@ err:
return ret;
}
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
{
- bv->bv_page = sg_page_iter_page(iter);
- bv->bv_offset = iter->sg->offset;
- bv->bv_len = PAGE_SIZE - iter->sg->offset;
+ bv->bv_page = sg_page(sg);
+ bv->bv_offset = sg->offset;
+ bv->bv_len = sg->length;
}
static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
{
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
- struct sg_page_iter sg_pg_iter;
+ ssize_t nr_bvec = req->sg_cnt;
unsigned long bv_cnt = 0;
bool is_sync = false;
size_t len = 0, total_len = 0;
ssize_t ret = 0;
loff_t pos;
-
+ int i;
+ struct scatterlist *sg;
if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
}
memset(&req->f.iocb, 0, sizeof(struct kiocb));
- for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
len += req->f.bvec[bv_cnt].bv_len;
total_len += req->f.bvec[bv_cnt].bv_len;
bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
static void nvmet_file_execute_rw(struct nvmet_req *req)
{
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+ ssize_t nr_bvec = req->sg_cnt;
if (!req->sg_cnt || !nr_bvec) {
nvmet_req_complete(req, 0);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 224d88634115..d994839a3e24 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
+void pcie_report_downtraining(struct pci_dev *dev);
/* Single Root I/O Virtualization */
struct pci_sriov {
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index d2eae3b7cc0f..4fa9e3523ee1 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
{
u16 lnk_ctl;
+ pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+
pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
}
-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
{
struct pcie_device *srv = context;
struct pci_dev *port = srv->port;
- struct pci_dev *dev;
u16 link_status, events;
int ret;
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
if (ret != PCIBIOS_SUCCESSFUL || !events)
return IRQ_NONE;
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+ pcie_update_link_speed(port->subordinate, link_status);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+{
+ struct pcie_device *srv = context;
+ struct pci_dev *port = srv->port;
+ struct pci_dev *dev;
+
/*
* Print status from downstream devices, not this root port or
* downstream switch port.
*/
down_read(&pci_bus_sem);
list_for_each_entry(dev, &port->subordinate->devices, bus_list)
- __pcie_print_link_status(dev, false);
+ pcie_report_downtraining(dev);
up_read(&pci_bus_sem);
- pcie_update_link_speed(port->subordinate, link_status);
- pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
return IRQ_HANDLED;
}
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
if (!pcie_link_bandwidth_notification_supported(srv->port))
return -ENODEV;
- ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler,
+ ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+ pcie_bw_notification_handler,
IRQF_SHARED, "PCIe BW notif", srv);
if (ret)
return ret;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2ec0df04e0dc..7e12d0163863 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
return dev;
}
-static void pcie_report_downtraining(struct pci_dev *dev)
+void pcie_report_downtraining(struct pci_dev *dev)
{
if (!pci_is_pcie(dev))
return;
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5163097b43df..4bbd9ede38c8 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
int new_mode;
- if (phy->index != 0)
+ if (phy->index != 0) {
+ if (mode == PHY_MODE_USB_HOST)
+ return 0;
return -EINVAL;
+ }
switch (mode) {
case PHY_MODE_USB_HOST:
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4159c63a5fd2..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
#include <asm/crw.h>
#include <asm/isc.h>
#include <asm/ebcdic.h>
+#include <asm/ap.h>
#include "css.h"
#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
" failed (rc=%d).\n", ret);
}
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+ CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+ if (sei_area->rs != 5)
+ return;
+
+ ap_bus_cfg_chg();
+}
+
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{
switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area);
break;
+ case 3: /* ap config changed */
+ chsc_process_sei_ap_cfg_chg(sei_area);
+ break;
case 7: /* channel-path-availability information */
chsc_process_sei_chp_avail(sei_area);
break;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..0b3b9de45c60 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
struct irb *irb;
+ bool is_final;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
+ is_final = !(scsw_actl(&irb->scsw) &
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- cp_free(&private->cp);
+ if (is_final)
+ cp_free(&private->cp);
}
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
- if (private->mdev)
+ if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE;
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e15816ff1265..1546389d71db 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
+ /* prepare ap queue device removal */
if (is_queue_dev(dev))
- ap_queue_remove(to_ap_queue(dev));
+ ap_queue_prepare_remove(to_ap_queue(dev));
+
+ /* driver's chance to clean up gracefully */
if (ap_drv->remove)
ap_drv->remove(ap_dev);
+ /* now do the ap queue device remove */
+ if (is_queue_dev(dev))
+ ap_queue_remove(to_ap_queue(dev));
+
/* Remove queue/card from list of active queues/cards */
spin_lock_bh(&ap_list_lock);
if (is_card_dev(dev))
@@ -861,6 +868,16 @@ void ap_bus_force_rescan(void)
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+ AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+ ap_bus_force_rescan();
+}
+
+/*
* hex2bitmap() - parse hex mask string and set bitmap.
* Valid strings are "0x012345678" with at least one valid hex number.
* Rest of the bitmap to the right is padded with 0. No spaces allowed
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d0059eae5d94..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,6 +91,7 @@ enum ap_state {
AP_STATE_WORKING,
AP_STATE_QUEUE_FULL,
AP_STATE_SUSPEND_WAIT,
+ AP_STATE_REMOVE, /* about to be removed from driver */
AP_STATE_UNBOUND, /* momentary not bound to a driver */
AP_STATE_BORKED, /* broken */
NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba261210c6da..6a340f2c3556 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
+ [AP_STATE_REMOVE] = {
+ [AP_EVENT_POLL] = ap_sm_nop,
+ [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ },
[AP_STATE_UNBOUND] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
}
EXPORT_SYMBOL(ap_flush_queue);
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
{
- ap_flush_queue(aq);
+ spin_lock_bh(&aq->lock);
+ /* flush queue */
+ __ap_flush_queue(aq);
+ /* set REMOVE state to prevent new messages are queued in */
+ aq->state = AP_STATE_REMOVE;
del_timer_sync(&aq->timeout);
+ spin_unlock_bh(&aq->lock);
+}
- /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+ /*
+ * all messages have been flushed and the state is
+ * AP_STATE_REMOVE. Now reset with zero which also
+ * clears the irq registration and move the state
+ * to AP_STATE_UNBOUND to signal that this queue
+ * is not used by any driver currently.
+ */
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid);
aq->state = AP_STATE_UNBOUND;
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_remove);
void ap_queue_reinit_state(struct ap_queue *aq)
{
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..689c2af7026a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module **pmod,
unsigned int weight)
{
if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
+ *pmod = zq->queue->ap_dev.drv->driver.owner;
return zq;
}
static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module *mod,
unsigned int weight)
{
- struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
zq->request_count--;
atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
unsigned int weight, pref_weight;
unsigned int func_code;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
unsigned int func_code;
unsigned short *domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
unsigned int func_code;
struct ap_message ap_msg;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
pref_weight = weight;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock);
out:
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 744a64680d5b..e8fc28dba8df 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
add_timer(&erp_action->timer);
}
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag)
+{
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+ read_unlock(&adapter->port_list_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
int clear, char *dbftag)
{
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
int lun_status;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
if (zsdev->port != port)
continue;
/* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3fce47b0b21b..c6acca521ffe 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
char *dbftag);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index db00b5e3abbe..33eddb02ee30 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
- if (!port->d_id)
- zfcp_erp_port_reopen(port,
- ZFCP_STATUS_COMMON_ERP_FAILED,
- "fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
no_entries = be16_to_cpu(head->rscn_plen) /
sizeof(struct fc_els_rscn_page);
+ if (no_entries > 1) {
+ /* handle failed ports */
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ if (port->d_id)
+ continue;
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcrscn1");
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ }
+
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f4f6a07c5222..221d0dfb8493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret = SUCCESS, fc_ret;
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+ zfcp_erp_wait(adapter);
+ }
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 1df5171594b8..11fb68d7e60d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
return capacity;
}
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+ return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
static inline int aac_adapter_check_health(struct aac_dev *dev)
{
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -1;
return (dev)->a_ops.adapter_check_health(dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e67e032936ef..78430a7b294c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
return -ETIMEDOUT;
}
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -EFAULT;
if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- if (unlikely(pci_channel_offline(dev->pdev)))
+ if (unlikely(aac_pci_offline(dev)))
return -EFAULT;
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dbaa4f131433..3ad997ac3510 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -139,6 +139,7 @@ static const struct {
{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+ { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
};
static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = rsp->data.info.rsp_code;
- scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+ scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
"flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
- cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+ cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
}
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
- rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
/* We need to re-setup the interpartition connection */
- dev_info(vhost->dev, "Re-enabling adapter\n");
+ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
ibmvfc_purge_requests(vhost, DID_REQUEUE);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
- } else {
- dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+ } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+ dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+ } else {
+ dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
}
return;
case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
break;
}
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
- ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
- ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
break;
}
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
- mad->iu.status, mad->iu.error,
+ be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
- rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
- rsp->fc_explain, status);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+ ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+ status);
break;
}
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
level += ibmvfc_retry_host_init(vhost);
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
- rsp->status, rsp->error);
+ be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c4a9a8..459cc288ba1d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
IBMVFC_CRQ_XPORT_EVENT = 0xFF,
};
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
IBMVFC_CRQ_INIT = 0x01,
IBMVFC_CRQ_INIT_COMPLETE = 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+ IBMVFC_PARTNER_FAILED = 0x01,
+ IBMVFC_PARTNER_DEREGISTER = 0x02,
IBMVFC_PARTITION_MIGRATED = 0x06,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e57774472e75..1d8c584ec1e9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
if (smid < ioc->hi_priority_smid) {
struct scsiio_tracker *st;
+ void *request;
st = _get_st_from_smid(ioc, smid);
if (!st) {
_base_recovery_check(ioc);
return;
}
+
+ /* Clear MPI request frame */
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
+
mpt3sas_base_clear_st(ioc, st);
_base_recovery_check(ioc);
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8bb5b8f9f4d2..1ccfbc7eebe0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
struct scsiio_tracker *st;
+ Mpi25SCSIIORequest_t *mpi_request;
if (smid > 0 &&
smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
u32 unique_tag = smid - 1;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /*
+ * If SCSI IO request is outstanding at driver level then
+ * DevHandle filed must be non-zero. If DevHandle is zero
+ * then it means that this smid is free at driver level,
+ * so return NULL.
+ */
+ if (!mpi_request->DevHandle)
+ return scmd;
+
scmd = scsi_host_find_tag(ioc->shost, unique_tag);
if (scmd) {
st = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 16a18d5d856f..6e4f4931ae17 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6a9040faed00..3b119ca0cc0c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state);
+ /*
+ * If the device state changes to SDEV_RUNNING, we need to run
+ * the queue to avoid I/O hang.
+ */
+ if (ret == 0 && state == SDEV_RUNNING)
+ blk_mq_run_hw_queues(sdev->request_queue, true);
mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 251db30d0882..2b2bc4b49d78 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- /*
- * XXX and what if there are packets in flight and this close()
- * XXX is followed by a "rmmod sd_mod"?
- */
-
scsi_disk_put(sdkp);
}
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
unsigned int opt_xfer_bytes =
logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ if (sdkp->opt_xfer_blocks == 0)
+ return false;
+
if (sdkp->opt_xfer_blocks > dev_max) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
-
+ struct request_queue *q = disk->queue;
+
ida_free(&sd_index_ida, sdkp->index);
+ /*
+ * Wait until all requests that are in progress have completed.
+ * This is necessary to avoid that e.g. scsi_end_request() crashes
+ * due to clearing the disk->private_data pointer. Wait from inside
+ * scsi_disk_release() instead of from sd_release() to avoid that
+ * freezing and unfreezing the request queue affects user space I/O
+ * in case multiple processes open a /dev/sd... node concurrently.
+ */
+ blk_mq_freeze_queue(q);
+ blk_mq_unfreeze_queue(q);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 9351349cf0a9..1e0041ec8132 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -150,7 +150,12 @@ struct bcm2835_power {
static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
{
- u64 start = ktime_get_ns();
+ u64 start;
+
+ if (!reg)
+ return 0;
+
+ start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
{
- u64 start = ktime_get_ns();
+ u64 start;
+
+ if (!reg)
+ return 0;
+
+ start = ktime_get_ns();
/* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
}
}
-static void
+static int
bcm2835_init_power_domain(struct bcm2835_power *power,
int pd_xlate_index, const char *name)
{
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
dom->clk = devm_clk_get(dev->parent, name);
+ if (IS_ERR(dom->clk)) {
+ int ret = PTR_ERR(dom->clk);
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /* Some domains don't have a clk, so make sure that we
+ * don't deref an error pointer later.
+ */
+ dom->clk = NULL;
+ }
dom->base.name = name;
dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
pm_genpd_init(&dom->base, NULL, true);
power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+
+ return 0;
}
/** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
};
- int ret, i;
+ int ret = 0, i;
u32 id;
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
- for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
- bcm2835_init_power_domain(power, i, power_domain_names[i]);
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+ ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
+ if (ret)
+ goto fail;
+ }
for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
ret = devm_reset_controller_register(dev, &power->reset);
if (ret)
- return ret;
+ goto fail;
of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
dev_info(dev, "Broadcom BCM2835 power domains driver");
return 0;
+
+fail:
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+ struct generic_pm_domain *dom = &power->domains[i].base;
+
+ if (dom->name)
+ pm_genpd_remove(dom);
+ }
+ return ret;
}
static int bcm2835_power_remove(struct platform_device *pdev)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c0901b96cfe4..62951e836cbc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
source "drivers/staging/mt7621-mmc/Kconfig"
-source "drivers/staging/mt7621-eth/Kconfig"
-
source "drivers/staging/mt7621-dts/Kconfig"
source "drivers/staging/gasket/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 57c6bce13ff4..d1b17ddcd354 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 687537203d9c..d9725888af6f 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
#
config XIL_AXIS_FIFO
tristate "Xilinx AXI-Stream FIFO IP core driver"
+ depends on OF
default n
help
This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index a7d569cfca5d..0dff1ac057cd 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
unsigned int mask);
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
unsigned int comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index eefa62f42c0f..5a32b8fc000e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
EXPORT_SYMBOL_GPL(comedi_dio_update_state);
/**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
* @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
*
* Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
*
* For digital input, output or input/output subdevices, samples for
* multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
*
* Returns the overall scan length in bytes.
*/
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- struct comedi_cmd *cmd = &s->async->cmd;
unsigned int num_samples;
unsigned int bits_per_sample;
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
}
return comedi_samples_to_bytes(s, num_samples);
}
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag. For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ return comedi_bytes_per_scan_cmd(s, cmd);
+}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5edf59ac6706..b04dad8c7092 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
struct ni_private *devpriv = dev->private;
+ unsigned int bytes_per_scan;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
- s->async->prealloc_bufsz /
- comedi_bytes_per_scan(s));
+ bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+ if (bytes_per_scan) {
+ err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+ s->async->prealloc_bufsz /
+ bytes_per_scan);
+ }
if (err)
return 3;
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 829f7b12e0dc..9bbc68729c11 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
[EROFS_FT_SYMLINK] = DT_LNK,
};
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+ unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+ /* since the on-disk name could not have the trailing '\0' */
+ unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+ memcpy(dbg_namebuf, de_name, de_namelen);
+ dbg_namebuf[de_namelen] = '\0';
+
+ debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
+ de_namelen, d_type);
+#endif
+}
+
static int erofs_fill_dentries(struct dir_context *ctx,
void *dentry_blk, unsigned int *ofs,
unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
de = dentry_blk + *ofs;
while (de < end) {
const char *de_name;
- int de_namelen;
+ unsigned int de_namelen;
unsigned char d_type;
-#ifdef CONFIG_EROFS_FS_DEBUG
- unsigned int dbg_namelen;
- unsigned char dbg_namebuf[EROFS_NAME_LEN];
-#endif
- if (unlikely(de->file_type < EROFS_FT_MAX))
+ if (de->file_type < EROFS_FT_MAX)
d_type = erofs_filetype_table[de->file_type];
else
d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
nameoff = le16_to_cpu(de->nameoff);
de_name = (char *)dentry_blk + nameoff;
- de_namelen = unlikely(de + 1 >= end) ?
- /* last directory entry */
- strnlen(de_name, maxsize - nameoff) :
- le16_to_cpu(de[1].nameoff) - nameoff;
+ /* the last dirent in the block? */
+ if (de + 1 >= end)
+ de_namelen = strnlen(de_name, maxsize - nameoff);
+ else
+ de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
/* a corrupted entry is found */
- if (unlikely(de_namelen < 0)) {
+ if (unlikely(nameoff + de_namelen > maxsize ||
+ de_namelen > EROFS_NAME_LEN)) {
DBG_BUGON(1);
return -EIO;
}
-#ifdef CONFIG_EROFS_FS_DEBUG
- dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
- memcpy(dbg_namebuf, de_name, dbg_namelen);
- dbg_namebuf[dbg_namelen] = '\0';
-
- debugln("%s, found de_name %s de_len %d d_type %d", __func__,
- dbg_namebuf, de_namelen, d_type);
-#endif
-
+ debug_one_dentry(d_type, de_name, de_namelen);
if (!dir_emit(ctx, de_name, de_namelen,
le64_to_cpu(de->nid), d_type))
/* stopped by some reason */
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8715bc50e09c..31eef8395774 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -972,6 +972,7 @@ repeat:
overlapped = false;
compressed_pages = grp->compressed_pages;
+ err = 0;
for (i = 0; i < clusterpages; ++i) {
unsigned int pagenr;
@@ -981,26 +982,39 @@ repeat:
DBG_BUGON(!page);
DBG_BUGON(!page->mapping);
- if (z_erofs_is_stagingpage(page))
- continue;
+ if (!z_erofs_is_stagingpage(page)) {
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (page->mapping == MNGD_MAPPING(sbi)) {
- DBG_BUGON(!PageUptodate(page));
- continue;
- }
+ if (page->mapping == MNGD_MAPPING(sbi)) {
+ if (unlikely(!PageUptodate(page)))
+ err = -EIO;
+ continue;
+ }
#endif
- /* only non-head page could be reused as a compressed page */
- pagenr = z_erofs_onlinepage_index(page);
+ /*
+ * only if non-head page can be selected
+ * for inplace decompression
+ */
+ pagenr = z_erofs_onlinepage_index(page);
- DBG_BUGON(pagenr >= nr_pages);
- DBG_BUGON(pages[pagenr]);
- ++sparsemem_pages;
- pages[pagenr] = page;
+ DBG_BUGON(pagenr >= nr_pages);
+ DBG_BUGON(pages[pagenr]);
+ ++sparsemem_pages;
+ pages[pagenr] = page;
- overlapped = true;
+ overlapped = true;
+ }
+
+ /* PG_error needs checking for inplaced and staging pages */
+ if (unlikely(PageError(page))) {
+ DBG_BUGON(PageUptodate(page));
+ err = -EIO;
+ }
}
+ if (unlikely(err))
+ goto out;
+
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
skip_allocpage:
vout = erofs_vmap(pages, nr_pages);
+ if (!vout) {
+ err = -ENOMEM;
+ goto out;
+ }
err = z_erofs_vle_unzip_vmap(compressed_pages,
clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
if (page->mapping == mc) {
WRITE_ONCE(grp->compressed_pages[nr], page);
+ ClearPageError(page);
if (!PagePrivate(page)) {
/*
* impossible to be !PagePrivate(page) for
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 48b263a2731a..0daac9b984a8 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
- if (clusterpages == 1)
+ if (clusterpages == 1) {
vin = kmap_atomic(compressed_pages[0]);
- else
+ } else {
vin = erofs_vmap(compressed_pages, clusterpages);
+ if (!vin)
+ return -ENOMEM;
+ }
preempt_disable();
vout = erofs_pcpubuf[smp_processor_id()].data;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b73385540216..250c15ace2a7 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -117,22 +117,6 @@
status = "okay";
};
-&ethernet {
- //mtd-mac-address = <&factory 0xe000>;
- gmac1: mac@0 {
- compatible = "mediatek,eth-mac";
- reg = <0>;
- phy-handle = <&phy1>;
- };
-
- mdio-bus {
- phy1: ethernet-phy@1 {
- reg = <1>;
- phy-mode = "rgmii";
- };
- };
-};
-
&pinctrl {
state_default: pinctrl0 {
gpio {
@@ -141,3 +125,16 @@
};
};
};
+
+&switch0 {
+ ports {
+ port@0 {
+ label = "ethblack";
+ status = "ok";
+ };
+ port@4 {
+ label = "ethblue";
+ status = "ok";
+ };
+ };
+};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6aff3680ce4b..17020e24abd2 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -372,16 +372,83 @@
mediatek,ethsys = <&ethsys>;
- mediatek,switch = <&gsw>;
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+ status = "off";
+ phy-mode = "rgmii";
+ phy-handle = <&phy5>;
+ };
mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- phy1f: ethernet-phy@1f {
- reg = <0x1f>;
+ phy5: ethernet-phy@5 {
+ reg = <5>;
phy-mode = "rgmii";
};
+
+ switch0: switch0@0 {
+ compatible = "mediatek,mt7621";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ mediatek,mcm;
+ resets = <&rstctrl 2>;
+ reset-names = "mcm";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ port@0 {
+ status = "off";
+ reg = <0>;
+ label = "lan0";
+ };
+ port@1 {
+ status = "off";
+ reg = <1>;
+ label = "lan1";
+ };
+ port@2 {
+ status = "off";
+ reg = <2>;
+ label = "lan2";
+ };
+ port@3 {
+ status = "off";
+ reg = <3>;
+ label = "lan3";
+ };
+ port@4 {
+ status = "off";
+ reg = <4>;
+ label = "lan4";
+ };
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
};
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644
index 596b38552697..000000000000
--- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs.
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
- "mediatek,mt7623-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-
-
-Additional required properties for ARM based SoCs:
-- mediatek,reset-pin: phandle describing the reset GPIO
-- clocks: the clocks used by the switch
-- clock-names: the names of the clocks listed in the clocks property
- these should be "trgpll", "esw", "gp2", "gp1"
-- mt7530-supply: the phandle of the regulator used to power the switch
-- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
- setup the drive current
-
-
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
-
-Example:
-
-gsw: switch@1b100000 {
- compatible = "mediatek,mt7623-gsw";
- reg = <0 0x1b110000 0 0x300000>;
-
- interrupt-parent = <&pio>;
- interrupts = <168 IRQ_TYPE_EDGE_RISING>;
-
- clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
- <&ethsys CLK_ETHSYS_ESW>,
- <&ethsys CLK_ETHSYS_GP2>,
- <&ethsys CLK_ETHSYS_GP1>;
- clock-names = "trgpll", "esw", "gp2", "gp1";
-
- mt7530-supply = <&mt6323_vpa_reg>;
-
- mediatek,pctl-regmap = <&syscfg_pctl_a>;
- mediatek,reset-pin = <&pio 15 0>;
-
- status = "okay";
-};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644
index 44ea86c7a96c..000000000000
--- a/drivers/staging/mt7621-eth/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-config NET_VENDOR_MEDIATEK_STAGING
- bool "MediaTek ethernet driver - staging version"
- depends on RALINK
- ---help---
- If you have an MT7621 Mediatek SoC with ethernet, say Y.
-
-if NET_VENDOR_MEDIATEK_STAGING
-choice
- prompt "MAC type"
-
-config NET_MEDIATEK_MT7621
- bool "MT7621"
- depends on MIPS && SOC_MT7621
-
-endchoice
-
-config NET_MEDIATEK_SOC_STAGING
- tristate "MediaTek SoC Gigabit Ethernet support"
- depends on NET_VENDOR_MEDIATEK_STAGING
- select PHYLIB
- ---help---
- This driver supports the gigabit ethernet MACs in the
- MediaTek SoC family.
-
-config NET_MEDIATEK_MDIO
- def_bool NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
- select PHYLIB
-
-config NET_MEDIATEK_MDIO_MT7620
- def_bool NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
- select NET_MEDIATEK_MDIO
-
-config NET_MEDIATEK_GSW_MT7621
- def_tristate NET_MEDIATEK_SOC_STAGING
- depends on NET_MEDIATEK_MT7621
-
-endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644
index 018bcc3596b3..000000000000
--- a/drivers/staging/mt7621-eth/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the Ralink SoCs built-in ethernet macs
-#
-
-mtk-eth-soc-y += mtk_eth_soc.o ethtool.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644
index f9e47d4b4cd4..000000000000
--- a/drivers/staging/mt7621-eth/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-
-- verify devicetree documentation is consistent with code
-- fix ethtool - currently doesn't return valid data.
-- general code review and clean up
-- add support for second MAC on mt7621
-- convert gsw code to use switchdev interfaces
-- md7620_mmi_write etc should probably be wrapped
- in a regmap abstraction.
-- Get soc_mt7621 to work with QDMA TX if possible.
-- Ensure phys are correctly configured when a cable
- is plugged in.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644
index 8c4228e2c987..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include "mtk_eth_soc.h"
-#include "ethtool.h"
-
-struct mtk_stat {
- char name[ETH_GSTRING_LEN];
- unsigned int idx;
-};
-
-#define MTK_HW_STAT(stat) { \
- .name = #stat, \
- .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
-}
-
-static const struct mtk_stat mtk_ethtool_hw_stats[] = {
- MTK_HW_STAT(tx_bytes),
- MTK_HW_STAT(tx_packets),
- MTK_HW_STAT(tx_skip),
- MTK_HW_STAT(tx_collisions),
- MTK_HW_STAT(rx_bytes),
- MTK_HW_STAT(rx_packets),
- MTK_HW_STAT(rx_overflow),
- MTK_HW_STAT(rx_fcs_errors),
- MTK_HW_STAT(rx_short_errors),
- MTK_HW_STAT(rx_long_errors),
- MTK_HW_STAT(rx_checksum_errors),
- MTK_HW_STAT(rx_flow_control_packets),
-};
-
-#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats)
-
-static int mtk_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
- err = phy_read_status(mac->phy_dev);
- if (err)
- return -ENODEV;
- }
-
- phy_ethtool_ksettings_get(mac->phy_dev, cmd);
- return 0;
-}
-
-static int mtk_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
- if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
- mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
- mac->phy_flags = MTK_PHY_FLAG_PORT;
- } else if (mac->hw->mii_bus) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
- cmd->base.phy_address);
- if (!mac->phy_dev)
- return -ENODEV;
- mac->phy_flags = MTK_PHY_FLAG_ATTACH;
- } else {
- return -ENODEV;
- }
- }
-
- return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-}
-
-static void mtk_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_soc_data *soc = mac->hw->soc;
-
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
-
- if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
- info->n_stats = MTK_HW_STATS_LEN;
-}
-
-static u32 mtk_get_msglevel(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- return mac->hw->msg_enable;
-}
-
-static void mtk_set_msglevel(struct net_device *dev, u32 value)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- mac->hw->msg_enable = value;
-}
-
-static int mtk_nway_reset(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -EOPNOTSUPP;
-
- return genphy_restart_aneg(mac->phy_dev);
-}
-
-static u32 mtk_get_link(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (!mac->phy_dev)
- goto out_get_link;
-
- if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
- err = genphy_update_link(mac->phy_dev);
- if (err)
- goto out_get_link;
- }
-
- return mac->phy_dev->link;
-
-out_get_link:
- return ethtool_op_get_link(dev);
-}
-
-static int mtk_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if ((ring->tx_pending < 2) ||
- (ring->rx_pending < 2) ||
- (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
- (ring->tx_pending > mac->hw->soc->dma_ring_size))
- return -EINVAL;
-
- dev->netdev_ops->ndo_stop(dev);
-
- mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
- mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
-
- return dev->netdev_ops->ndo_open(dev);
-}
-
-static void mtk_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- ring->rx_max_pending = mac->hw->soc->dma_ring_size;
- ring->tx_max_pending = mac->hw->soc->dma_ring_size;
- ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
- ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
-}
-
-static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < MTK_HW_STATS_LEN; i++) {
- memcpy(data, mtk_ethtool_hw_stats[i].name,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int mtk_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return MTK_HW_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void mtk_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_hw_stats *hwstats = mac->hw_stats;
- unsigned int start;
- int i;
-
- if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hwstats->stats_lock)) {
- mtk_stats_update_mac(mac);
- spin_unlock(&hwstats->stats_lock);
- }
- }
-
- do {
- start = u64_stats_fetch_begin_irq(&hwstats->syncp);
- for (i = 0; i < MTK_HW_STATS_LEN; i++)
- data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
-
- } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-}
-
-static struct ethtool_ops mtk_ethtool_ops = {
- .get_link_ksettings = mtk_get_link_ksettings,
- .set_link_ksettings = mtk_set_link_ksettings,
- .get_drvinfo = mtk_get_drvinfo,
- .get_msglevel = mtk_get_msglevel,
- .set_msglevel = mtk_set_msglevel,
- .nway_reset = mtk_nway_reset,
- .get_link = mtk_get_link,
- .set_ringparam = mtk_set_ringparam,
- .get_ringparam = mtk_get_ringparam,
-};
-
-void mtk_set_ethtool_ops(struct net_device *netdev)
-{
- struct mtk_mac *mac = netdev_priv(netdev);
- struct mtk_soc_data *soc = mac->hw->soc;
-
- if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
- mtk_ethtool_ops.get_strings = mtk_get_strings;
- mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
- mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
- }
-
- netdev->ethtool_ops = &mtk_ethtool_ops;
-}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644
index 0071469aea6c..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETHTOOL_H
-#define MTK_ETHTOOL_H
-
-#include <linux/ethtool.h>
-
-void mtk_set_ethtool_ops(struct net_device *netdev);
-
-#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644
index 70f7e5481952..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_GSW_MT7620_H__
-#define _RALINK_GSW_MT7620_H__
-
-#define GSW_REG_PHY_TIMEOUT (5 * HZ)
-
-#define MT7620_GSW_REG_PIAC 0x0004
-
-#define GSW_NUM_VLANS 16
-#define GSW_NUM_VIDS 4096
-#define GSW_NUM_PORTS 7
-#define GSW_PORT6 6
-
-#define GSW_MDIO_ACCESS BIT(31)
-#define GSW_MDIO_READ BIT(19)
-#define GSW_MDIO_WRITE BIT(18)
-#define GSW_MDIO_START BIT(16)
-#define GSW_MDIO_ADDR_SHIFT 20
-#define GSW_MDIO_REG_SHIFT 25
-
-#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100))
-#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
-#define GSW_REG_SMACCR0 0x3fE4
-#define GSW_REG_SMACCR1 0x3fE8
-#define GSW_REG_CKGCR 0x3ff0
-
-#define GSW_REG_IMR 0x7008
-#define GSW_REG_ISR 0x700c
-#define GSW_REG_GPC1 0x7014
-
-#define SYSC_REG_CHIP_REV_ID 0x0c
-#define SYSC_REG_CFG 0x10
-#define SYSC_REG_CFG1 0x14
-#define RST_CTRL_MCM BIT(2)
-#define SYSC_PAD_RGMII2_MDIO 0x58
-#define SYSC_GPIO_MODE 0x60
-
-#define PORT_IRQ_ST_CHG 0x7f
-
-#define MT7621_ESW_PHY_POLLING 0x0000
-#define MT7620_ESW_PHY_POLLING 0x7000
-
-#define PMCR_IPG BIT(18)
-#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
-#define PMCR_BACKOFF BIT(9)
-#define PMCR_BACKPRES BIT(8)
-#define PMCR_RX_FC BIT(5)
-#define PMCR_TX_FC BIT(4)
-#define PMCR_SPEED(_x) (_x << 2)
-#define PMCR_DUPLEX BIT(1)
-#define PMCR_LINK BIT(0)
-
-#define PHY_AN_EN BIT(31)
-#define PHY_PRE_EN BIT(30)
-#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24)
-
-/* ethernet subsystem config register */
-#define ETHSYS_SYSCFG0 0x14
-/* ethernet subsystem clock register */
-#define ETHSYS_CLKCFG0 0x2c
-#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
-
-/* p5 RGMII wrapper TX clock control register */
-#define MT7530_P5RGMIITXCR 0x7b04
-/* p5 RGMII wrapper RX clock control register */
-#define MT7530_P5RGMIIRXCR 0x7b00
-/* TRGMII TDX ODT registers */
-#define MT7530_TRGMII_TD0_ODT 0x7a54
-#define MT7530_TRGMII_TD1_ODT 0x7a5c
-#define MT7530_TRGMII_TD2_ODT 0x7a64
-#define MT7530_TRGMII_TD3_ODT 0x7a6c
-#define MT7530_TRGMII_TD4_ODT 0x7a74
-#define MT7530_TRGMII_TD5_ODT 0x7a7c
-/* TRGMII TCK ctrl register */
-#define MT7530_TRGMII_TCK_CTRL 0x7a78
-/* TRGMII Tx ctrl register */
-#define MT7530_TRGMII_TXCTRL 0x7a40
-/* port 6 extended control register */
-#define MT7530_P6ECR 0x7830
-/* IO driver control register */
-#define MT7530_IO_DRV_CR 0x7810
-/* top signal control register */
-#define MT7530_TOP_SIG_CTRL 0x7808
-/* modified hwtrap register */
-#define MT7530_MHWTRAP 0x7804
-/* hwtrap status register */
-#define MT7530_HWTRAP 0x7800
-/* status interrupt register */
-#define MT7530_SYS_INT_STS 0x700c
-/* system nterrupt register */
-#define MT7530_SYS_INT_EN 0x7008
-/* system control register */
-#define MT7530_SYS_CTRL 0x7000
-/* port MAC status register */
-#define MT7530_PMSR_P(x) (0x3008 + (x * 0x100))
-/* port MAC control register */
-#define MT7530_PMCR_P(x) (0x3000 + (x * 0x100))
-
-#define MT7621_XTAL_SHIFT 6
-#define MT7621_XTAL_MASK 0x7
-#define MT7621_XTAL_25 6
-#define MT7621_XTAL_40 3
-#define MT7621_MDIO_DRV_MASK (3 << 4)
-#define MT7621_GE1_MODE_MASK (3 << 12)
-
-#define TRGMII_TXCTRL_TXC_INV BIT(30)
-#define P6ECR_INTF_MODE_RGMII BIT(1)
-#define P5RGMIIRXCR_C_ALIGN BIT(8)
-#define P5RGMIIRXCR_DELAY_2 BIT(1)
-#define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2))
-
-/* TOP_SIG_CTRL bits */
-#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
-
-/* MHWTRAP bits */
-#define MHWTRAP_MANUAL BIT(16)
-#define MHWTRAP_P5_MAC_SEL BIT(13)
-#define MHWTRAP_P6_DIS BIT(8)
-#define MHWTRAP_P5_RGMII_MODE BIT(7)
-#define MHWTRAP_P5_DIS BIT(6)
-#define MHWTRAP_PHY_ACCESS BIT(5)
-
-/* HWTRAP bits */
-#define HWTRAP_XTAL_SHIFT 9
-#define HWTRAP_XTAL_MASK 0x3
-
-/* SYS_CTRL bits */
-#define SYS_CTRL_SW_RST BIT(1)
-#define SYS_CTRL_REG_RST BIT(0)
-
-/* PMCR bits */
-#define PMCR_IFG_XMIT_96 BIT(18)
-#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE_MODE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
-#define PMCR_BACK_PRES_EN BIT(9)
-#define PMCR_BACKOFF_EN BIT(8)
-#define PMCR_TX_FC_EN BIT(5)
-#define PMCR_RX_FC_EN BIT(4)
-#define PMCR_FORCE_SPEED_1000 BIT(3)
-#define PMCR_FORCE_FDX BIT(1)
-#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
- PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
- PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
- PMCR_FORCE_LNK)
-
-#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-
-/* TRGMII control registers */
-#define GSW_INTF_MODE 0x390
-#define GSW_TRGMII_TD0_ODT 0x354
-#define GSW_TRGMII_TD1_ODT 0x35c
-#define GSW_TRGMII_TD2_ODT 0x364
-#define GSW_TRGMII_TD3_ODT 0x36c
-#define GSW_TRGMII_TXCTL_ODT 0x374
-#define GSW_TRGMII_TCK_ODT 0x37c
-#define GSW_TRGMII_RCK_CTRL 0x300
-
-#define INTF_MODE_TRGMII BIT(1)
-#define TRGMII_RCK_CTRL_RX_RST BIT(31)
-
-/* Mac control registers */
-#define MTK_MAC_P2_MCR 0x200
-#define MTK_MAC_P1_MCR 0x100
-
-#define MAC_MCR_MAX_RX_2K BIT(29)
-#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
-#define MAC_MCR_FORCE_MODE BIT(15)
-#define MAC_MCR_TX_EN BIT(14)
-#define MAC_MCR_RX_EN BIT(13)
-#define MAC_MCR_BACKOFF_EN BIT(9)
-#define MAC_MCR_BACKPR_EN BIT(8)
-#define MAC_MCR_FORCE_RX_FC BIT(5)
-#define MAC_MCR_FORCE_TX_FC BIT(4)
-#define MAC_MCR_SPEED_1000 BIT(3)
-#define MAC_MCR_FORCE_DPX BIT(1)
-#define MAC_MCR_FORCE_LINK BIT(0)
-#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
- MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
- MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
- MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
-#define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
- MAC_MCR_FIXED_LINK)
-
-/* possible XTAL speed */
-#define MT7623_XTAL_40 0
-#define MT7623_XTAL_20 1
-#define MT7623_XTAL_25 3
-
-/* GPIO port control registers */
-#define GPIO_OD33_CTRL8 0x4c0
-#define GPIO_BIAS_CTRL 0xed0
-#define GPIO_DRV_SEL10 0xf00
-
-/* on MT7620 the functio of port 4 can be software configured */
-enum {
- PORT4_EPHY = 0,
- PORT4_EXT,
-};
-
-/* struct mt7620_gsw - the structure that holds the SoC specific data
- * @dev: The Device struct
- * @base: The base address
- * @piac_offset: The PIAC base may change depending on SoC
- * @irq: The IRQ we are using
- * @port4: The port4 mode on MT7620
- * @autopoll: Is MDIO autopolling enabled
- * @ethsys: The ethsys register map
- * @pctl: The pin control register map
- * @clk_gsw: The switch clock
- * @clk_gp1: The gmac1 clock
- * @clk_gp2: The gmac2 clock
- * @clk_trgpll: The trgmii pll clock
- */
-struct mt7620_gsw {
- struct device *dev;
- void __iomem *base;
- u32 piac_offset;
- int irq;
- int port4;
- unsigned long int autopoll;
-
- struct regmap *ethsys;
- struct regmap *pctl;
-
- struct clk *clk_gsw;
- struct clk *clk_gp1;
- struct clk *clk_gp2;
- struct clk *clk_trgpll;
-};
-
-/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
-
-/* the callback used by the driver core to bringup the switch */
-int mtk_gsw_init(struct mtk_eth *eth);
-
-/* MDIO access wrappers */
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
-int mt7620_has_carrier(struct mtk_eth *eth);
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
- int speed, int duplex);
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
- u32 phy_register, u32 write_data);
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
-void mt7620_handle_carrier(struct mtk_eth *eth);
-
-#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644
index 53767b17bad9..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-
-#include <ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
-{
- iowrite32(val, gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_w32);
-
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
-{
- return ioread32(gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_r32);
-
-static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
-{
- struct mtk_eth *eth = (struct mtk_eth *)_eth;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
- u32 reg, i;
-
- reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
-
- for (i = 0; i < 5; i++) {
- unsigned int link;
-
- if ((reg & BIT(i)) == 0)
- continue;
-
- link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
-
- if (link == eth->link[i])
- continue;
-
- eth->link[i] = link;
- if (link)
- netdev_info(*eth->netdev,
- "port %d link up\n", i);
- else
- netdev_info(*eth->netdev,
- "port %d link down\n", i);
- }
-
- mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
-
- return IRQ_HANDLED;
-}
-
-static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
- struct device_node *np)
-{
- u32 i;
- u32 val;
-
- /* hardware reset the switch */
- mtk_reset(eth, RST_CTRL_MCM);
- mdelay(10);
-
- /* reduce RGMII2 PAD driving strength */
- rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
-
- /* gpio mux - RGMII1=Normal mode */
- rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
-
- /* set GMAC1 RGMII mode */
- rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
-
- /* enable MDIO to control MT7530 */
- rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
-
- /* turn off all PHYs */
- for (i = 0; i <= 4; i++) {
- val = _mt7620_mii_read(gsw, i, 0x0);
- val |= BIT(11);
- _mt7620_mii_write(gsw, i, 0x0, val);
- }
-
- /* reset the switch */
- mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
- SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
- usleep_range(10, 20);
-
- if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
- /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
- mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
- mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
- } else {
- /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
- mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
- mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
- }
-
- /* GE2, Link down */
- mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
-
- /* Enable Port 6, P5 as GMAC5, P5 disable */
- val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
- /* Enable Port 6 */
- val &= ~MHWTRAP_P6_DIS;
- /* Disable Port 5 */
- val |= MHWTRAP_P5_DIS;
- /* manual override of HW-Trap */
- val |= MHWTRAP_MANUAL;
- mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
-
- val = rt_sysc_r32(SYSC_REG_CFG);
- val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
- if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
- /* 40Mhz */
-
- /* disable MT7530 core clock */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x410);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x0);
-
- /* disable MT7530 PLL */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40d);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x2020);
-
- /* for MT7530 core clock = 500Mhz */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40e);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x119);
-
- /* enable MT7530 PLL */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x40d);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- _mt7620_mii_write(gsw, 0, 14, 0x2820);
-
- usleep_range(20, 40);
-
- /* enable MT7530 core clock */
- _mt7620_mii_write(gsw, 0, 13, 0x1f);
- _mt7620_mii_write(gsw, 0, 14, 0x410);
- _mt7620_mii_write(gsw, 0, 13, 0x401f);
- }
-
- /* RGMII */
- _mt7620_mii_write(gsw, 0, 14, 0x1);
-
- /* set MT7530 central align */
- mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
- mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
- MT7530_TRGMII_TXCTRL);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
-
- /* delay setting for 10/1000M */
- mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
- P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
- mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
-
- /* lower Tx Driving*/
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
- mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
-
- /* turn on all PHYs */
- for (i = 0; i <= 4; i++) {
- val = _mt7620_mii_read(gsw, i, 0);
- val &= ~BIT(11);
- _mt7620_mii_write(gsw, i, 0, val);
- }
-
-#define MT7530_NUM_PORTS 8
-#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8))
-#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8))
-#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8))
-#define MT7530_CPU_PORT 6
-
- /* This is copied from mt7530_apply_config in libreCMC driver */
- {
- int i;
-
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
-
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
- 0x00ff0000);
-
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
- }
-
- /* enable irq */
- mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
- mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-}
-
-static const struct of_device_id mediatek_gsw_match[] = {
- { .compatible = "mediatek,mt7621-gsw" },
- {},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
-
-int mtk_gsw_init(struct mtk_eth *eth)
-{
- struct device_node *np = eth->switch_np;
- struct platform_device *pdev = of_find_device_by_node(np);
- struct mt7620_gsw *gsw;
-
- if (!pdev)
- return -ENODEV;
-
- if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
- return -EINVAL;
-
- gsw = platform_get_drvdata(pdev);
- eth->sw_priv = gsw;
-
- if (!gsw->irq)
- return -EINVAL;
-
- request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
- "gsw", eth);
- disable_irq(gsw->irq);
-
- mt7621_hw_init(eth, gsw, np);
-
- enable_irq(gsw->irq);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mtk_gsw_init);
-
-static int mt7621_gsw_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct mt7620_gsw *gsw;
-
- gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
- if (!gsw)
- return -ENOMEM;
-
- gsw->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(gsw->base))
- return PTR_ERR(gsw->base);
-
- gsw->dev = &pdev->dev;
- gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-
- platform_set_drvdata(pdev, gsw);
-
- return 0;
-}
-
-static int mt7621_gsw_remove(struct platform_device *pdev)
-{
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver gsw_driver = {
- .probe = mt7621_gsw_probe,
- .remove = mt7621_gsw_remove,
- .driver = {
- .name = "mt7621-gsw",
- .of_match_table = mediatek_gsw_match,
- },
-};
-
-module_platform_driver(gsw_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644
index 5fea6a447eed..000000000000
--- a/drivers/staging/mt7621-eth/mdio.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-
-static int mtk_mdio_reset(struct mii_bus *bus)
-{
- /* TODO */
- return 0;
-}
-
-static void mtk_phy_link_adjust(struct net_device *dev)
-{
- struct mtk_eth *eth = netdev_priv(dev);
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&eth->phy->lock, flags);
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_node[i]) {
- struct phy_device *phydev = eth->phy->phy[i];
- int status_change = 0;
-
- if (phydev->link)
- if (eth->phy->duplex[i] != phydev->duplex ||
- eth->phy->speed[i] != phydev->speed)
- status_change = 1;
-
- if (phydev->link != eth->link[i])
- status_change = 1;
-
- switch (phydev->speed) {
- case SPEED_1000:
- case SPEED_100:
- case SPEED_10:
- eth->link[i] = phydev->link;
- eth->phy->duplex[i] = phydev->duplex;
- eth->phy->speed[i] = phydev->speed;
-
- if (status_change &&
- eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- break;
- }
- }
- }
- spin_unlock_irqrestore(&eth->phy->lock, flags);
-}
-
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node)
-{
- const __be32 *_port = NULL;
- struct phy_device *phydev;
- int phy_mode, port;
-
- _port = of_get_property(phy_node, "reg", NULL);
-
- if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
- pr_err("%pOFn: invalid port id\n", phy_node);
- return -EINVAL;
- }
- port = be32_to_cpu(*_port);
- phy_mode = of_get_phy_mode(phy_node);
- if (phy_mode < 0) {
- dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
- eth->phy->phy_node[port] = NULL;
- return -EINVAL;
- }
-
- phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
- mtk_phy_link_adjust, 0, phy_mode);
- if (!phydev) {
- dev_err(eth->dev, "could not connect to PHY\n");
- eth->phy->phy_node[port] = NULL;
- return -ENODEV;
- }
-
- phydev->supported &= PHY_1000BT_FEATURES;
- phydev->advertising = phydev->supported;
-
- dev_info(eth->dev,
- "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
- port, phydev_name(phydev), phydev->phy_id,
- phydev->drv->name);
-
- eth->phy->phy[port] = phydev;
- eth->link[port] = 0;
-
- return 0;
-}
-
-static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
- struct phy_device *phy)
-{
- phy_attach(eth->netdev[mac->id], phydev_name(phy),
- PHY_INTERFACE_MODE_MII);
-
- phy->autoneg = AUTONEG_ENABLE;
- phy->speed = 0;
- phy->duplex = 0;
- phy_set_max_speed(phy, SPEED_100);
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
-
- phy_start_aneg(phy);
-}
-
-static int mtk_phy_connect(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- int i;
-
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_node[i]) {
- if (!mac->phy_dev) {
- mac->phy_dev = eth->phy->phy[i];
- mac->phy_flags = MTK_PHY_FLAG_PORT;
- }
- } else if (eth->mii_bus) {
- struct phy_device *phy;
-
- phy = mdiobus_get_phy(eth->mii_bus, i);
- if (phy) {
- phy_init(eth, mac, phy);
- if (!mac->phy_dev) {
- mac->phy_dev = phy;
- mac->phy_flags = MTK_PHY_FLAG_ATTACH;
- }
- }
- }
- }
-
- return 0;
-}
-
-static void mtk_phy_disconnect(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++)
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 0;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_disconnect(eth->phy->phy[i]);
- } else if (eth->mii_bus) {
- struct phy_device *phy =
- mdiobus_get_phy(eth->mii_bus, i);
-
- if (phy)
- phy_detach(phy);
- }
-}
-
-static void mtk_phy_start(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++) {
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 1;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_start(eth->phy->phy[i]);
- }
- }
-}
-
-static void mtk_phy_stop(struct mtk_mac *mac)
-{
- struct mtk_eth *eth = mac->hw;
- unsigned long flags;
- int i;
-
- for (i = 0; i < 8; i++)
- if (eth->phy->phy_fixed[i]) {
- spin_lock_irqsave(&eth->phy->lock, flags);
- eth->link[i] = 0;
- if (eth->soc->mdio_adjust_link)
- eth->soc->mdio_adjust_link(eth, i);
- spin_unlock_irqrestore(&eth->phy->lock, flags);
- } else if (eth->phy->phy[i]) {
- phy_stop(eth->phy->phy[i]);
- }
-}
-
-static struct mtk_phy phy_ralink = {
- .connect = mtk_phy_connect,
- .disconnect = mtk_phy_disconnect,
- .start = mtk_phy_start,
- .stop = mtk_phy_stop,
-};
-
-int mtk_mdio_init(struct mtk_eth *eth)
-{
- struct device_node *mii_np;
- int err;
-
- if (!eth->soc->mdio_read || !eth->soc->mdio_write)
- return 0;
-
- spin_lock_init(&phy_ralink.lock);
- eth->phy = &phy_ralink;
-
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
- if (!mii_np) {
- dev_err(eth->dev, "no %s child node found", "mdio-bus");
- return -ENODEV;
- }
-
- if (!of_device_is_available(mii_np)) {
- err = 0;
- goto err_put_node;
- }
-
- eth->mii_bus = mdiobus_alloc();
- if (!eth->mii_bus) {
- err = -ENOMEM;
- goto err_put_node;
- }
-
- eth->mii_bus->name = "mdio";
- eth->mii_bus->read = eth->soc->mdio_read;
- eth->mii_bus->write = eth->soc->mdio_write;
- eth->mii_bus->reset = mtk_mdio_reset;
- eth->mii_bus->priv = eth;
- eth->mii_bus->parent = eth->dev;
-
- snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
- err = of_mdiobus_register(eth->mii_bus, mii_np);
- if (err)
- goto err_free_bus;
-
- return 0;
-
-err_free_bus:
- kfree(eth->mii_bus);
-err_put_node:
- of_node_put(mii_np);
- eth->mii_bus = NULL;
- return err;
-}
-
-void mtk_mdio_cleanup(struct mtk_eth *eth)
-{
- if (!eth->mii_bus)
- return;
-
- mdiobus_unregister(eth->mii_bus);
- of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
-}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644
index b14e23842a01..000000000000
--- a/drivers/staging/mt7621-eth/mdio.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_MDIO_H__
-#define _RALINK_MDIO_H__
-
-#ifdef CONFIG_NET_MEDIATEK_MDIO
-int mtk_mdio_init(struct mtk_eth *eth);
-void mtk_mdio_cleanup(struct mtk_eth *eth);
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node);
-#else
-static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
-static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
-#endif
-#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644
index ced605c2914e..000000000000
--- a/drivers/staging/mt7621-eth/mdio_mt7620.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
-{
- unsigned long t_start = jiffies;
-
- while (1) {
- if (!(mtk_switch_r32(gsw,
- gsw->piac_offset + MT7620_GSW_REG_PIAC) &
- GSW_MDIO_ACCESS))
- return 0;
- if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
- break;
- }
-
- dev_err(gsw->dev, "mdio: MDIO timeout\n");
- return -1;
-}
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
- u32 phy_register, u32 write_data)
-{
- if (mt7620_mii_busy_wait(gsw))
- return -1;
-
- write_data &= 0xffff;
-
- mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
- (phy_register << GSW_MDIO_REG_SHIFT) |
- (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
- MT7620_GSW_REG_PIAC);
-
- if (mt7620_mii_busy_wait(gsw))
- return -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_write);
-
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
-{
- u32 d;
-
- if (mt7620_mii_busy_wait(gsw))
- return 0xffff;
-
- mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
- (phy_reg << GSW_MDIO_REG_SHIFT) |
- (phy_addr << GSW_MDIO_ADDR_SHIFT),
- MT7620_GSW_REG_PIAC);
-
- if (mt7620_mii_busy_wait(gsw))
- return 0xffff;
-
- d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
-
- return d;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_read);
-
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
-{
- struct mtk_eth *eth = bus->priv;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
- return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
-}
-
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
-{
- struct mtk_eth *eth = bus->priv;
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
- return _mt7620_mii_read(gsw, phy_addr, phy_reg);
-}
-
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
-{
- _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
- _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
- _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
-
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
-{
- u16 high, low;
-
- _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
- low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
- high = _mt7620_mii_read(gsw, 0x1f, 0x10);
-
- return (high << 16) | (low & 0xffff);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
-
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
-{
- u32 val = mt7530_mdio_r32(gsw, reg);
-
- val &= ~mask;
- val |= set;
- mt7530_mdio_w32(gsw, reg, val);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
-
-static unsigned char *mtk_speed_str(int speed)
-{
- switch (speed) {
- case 2:
- case SPEED_1000:
- return "1000";
- case 1:
- case SPEED_100:
- return "100";
- case 0:
- case SPEED_10:
- return "10";
- }
-
- return "? ";
-}
-
-int mt7620_has_carrier(struct mtk_eth *eth)
-{
- struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
- int i;
-
- for (i = 0; i < GSW_PORT6; i++)
- if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
- return 1;
- return 0;
-}
-
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
- int speed, int duplex)
-{
- struct mt7620_gsw *gsw = eth->sw_priv;
-
- if (link)
- dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
- port, mtk_speed_str(speed),
- (duplex) ? "Full" : "Half");
- else
- dev_info(gsw->dev, "port %d link down\n", port);
-}
-
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
-{
- mt7620_print_link_state(eth, port, eth->link[port],
- eth->phy->speed[port],
- (eth->phy->duplex[port] == DUPLEX_FULL));
-}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644
index 02a8584b3d1d..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ /dev/null
@@ -1,2178 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/clk.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/if_vlan.h>
-#include <linux/reset.h>
-#include <linux/tcp.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-#include "ethtool.h"
-
-#define MAX_RX_LENGTH 1536
-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
-#define DMA_DUMMY_DESC 0xffffffff
-#define MTK_DEFAULT_MSG_ENABLE \
- (NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR)
-
-#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
-#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
-#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
-
-#define SYSC_REG_RSTCTRL 0x34
-
-static int mtk_msg_level = -1;
-module_param_named(msg_level, mtk_msg_level, int, 0);
-MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
-
-static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
- [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
- [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
- [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
- [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
- [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
- [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
- [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
- [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
- [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
- [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
- [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
- [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
- [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
- [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
- [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
- [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
-};
-
-static const u16 *mtk_reg_table = mtk_reg_table_default;
-
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
-{
- __raw_writel(val, eth->base + reg);
-}
-
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
-{
- return __raw_readl(eth->base + reg);
-}
-
-static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
-{
- mtk_w32(eth, val, mtk_reg_table[reg]);
-}
-
-static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
-{
- return mtk_r32(eth, mtk_reg_table[reg]);
-}
-
-/* these bits are also exposed via the reset-controller API. however the switch
- * and FE need to be brought out of reset in the exakt same moemtn and the
- * reset-controller api does not provide this feature yet. Do the reset manually
- * until we fixed the reset-controller api to be able to do this
- */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
-{
- u32 val;
-
- regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
- val |= reset_bits;
- regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
- usleep_range(10, 20);
- val &= ~reset_bits;
- regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
- usleep_range(10, 20);
-}
-EXPORT_SYMBOL(mtk_reset);
-
-static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
-{
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
-}
-
-static inline u32 mtk_irq_pending(struct mtk_eth *eth)
-{
- u32 status = 0;
-
- if (eth->soc->dma_type & MTK_PDMA)
- status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
- if (eth->soc->dma_type & MTK_QDMA)
- status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
-
- return status;
-}
-
-static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
-{
- u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
- if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
- status_reg = MTK_REG_MTK_INT_STATUS2;
-
- mtk_reg_w32(eth, mask, status_reg);
-}
-
-static u32 mtk_irq_pending_status(struct mtk_eth *eth)
-{
- u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
- if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
- status_reg = MTK_REG_MTK_INT_STATUS2;
-
- return mtk_reg_r32(eth, status_reg);
-}
-
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
-{
- u32 val;
-
- if (eth->soc->dma_type & MTK_PDMA) {
- val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
- /* flush write */
- mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
- /* flush write */
- mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- }
-}
-
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
-{
- u32 val;
-
- if (eth->soc->dma_type & MTK_PDMA) {
- val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
- /* flush write */
- mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
- /* flush write */
- mtk_r32(eth, MTK_QMTK_INT_ENABLE);
- }
-}
-
-static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
-{
- u32 enabled = 0;
-
- if (eth->soc->dma_type & MTK_PDMA)
- enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
- if (eth->soc->dma_type & MTK_QDMA)
- enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-
- return enabled;
-}
-
-static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
- unsigned char *macaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mac->hw->page_lock, flags);
- mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
- mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
- (macaddr[4] << 8) | macaddr[5],
- MTK_GDMA1_MAC_ADRL);
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static int mtk_set_mac_address(struct net_device *dev, void *p)
-{
- int ret = eth_mac_addr(dev, p);
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- if (ret)
- return ret;
-
- if (eth->soc->set_mac)
- eth->soc->set_mac(mac, dev->dev_addr);
- else
- mtk_hw_set_macaddr(mac, p);
-
- return 0;
-}
-
-static inline int mtk_max_frag_size(int mtu)
-{
- /* make sure buf_size will be at least MAX_RX_LENGTH */
- if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
- mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
-
- return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
-static inline int mtk_max_buf_size(int frag_size)
-{
- int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
- WARN_ON(buf_size < MAX_RX_LENGTH);
-
- return buf_size;
-}
-
-static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
- struct mtk_rx_dma *dma_rxd)
-{
- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
- rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
- rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
- rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-}
-
-static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
- struct mtk_tx_dma *dma_txd)
-{
- WRITE_ONCE(dma_txd->txd1, txd->txd1);
- WRITE_ONCE(dma_txd->txd3, txd->txd3);
- WRITE_ONCE(dma_txd->txd4, txd->txd4);
- /* clean dma done flag last */
- WRITE_ONCE(dma_txd->txd2, txd->txd2);
-}
-
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
- int i;
-
- if (ring->rx_data && ring->rx_dma) {
- for (i = 0; i < ring->rx_ring_size; i++) {
- if (!ring->rx_data[i])
- continue;
- if (!ring->rx_dma[i].rxd1)
- continue;
- dma_unmap_single(eth->dev,
- ring->rx_dma[i].rxd1,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- skb_free_frag(ring->rx_data[i]);
- }
- kfree(ring->rx_data);
- ring->rx_data = NULL;
- }
-
- if (ring->rx_dma) {
- dma_free_coherent(eth->dev,
- ring->rx_ring_size * sizeof(*ring->rx_dma),
- ring->rx_dma,
- ring->rx_phys);
- ring->rx_dma = NULL;
- }
-}
-
-static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
- int i, pad = 0;
-
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
- ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
- ring->rx_ring_size = eth->soc->dma_ring_size;
- ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
- GFP_KERNEL);
- if (!ring->rx_data)
- goto no_rx_mem;
-
- for (i = 0; i < ring->rx_ring_size; i++) {
- ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
- if (!ring->rx_data[i])
- goto no_rx_mem;
- }
-
- ring->rx_dma =
- dma_alloc_coherent(eth->dev,
- ring->rx_ring_size * sizeof(*ring->rx_dma),
- &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
- if (!ring->rx_dma)
- goto no_rx_mem;
-
- if (!eth->soc->rx_2b_offset)
- pad = NET_IP_ALIGN;
-
- for (i = 0; i < ring->rx_ring_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->rx_data[i] + NET_SKB_PAD + pad,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- goto no_rx_mem;
- ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
-
- if (eth->soc->rx_sg_dma)
- ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
- else
- ring->rx_dma[i].rxd2 = RX_DMA_LSO;
- }
- ring->rx_calc_idx = ring->rx_ring_size - 1;
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- return 0;
-
-no_rx_mem:
- return -ENOMEM;
-}
-
-static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
-{
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- }
- if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
- dma_unmap_page(dev,
- dma_unmap_addr(tx_buf, dma_addr1),
- dma_unmap_len(tx_buf, dma_len1),
- DMA_TO_DEVICE);
-
- tx_buf->flags = 0;
- if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
- dev_kfree_skb_any(tx_buf->skb);
- tx_buf->skb = NULL;
-}
-
-static void mtk_pdma_tx_clean(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i;
-
- if (ring->tx_buf) {
- for (i = 0; i < ring->tx_ring_size; i++)
- mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
- kfree(ring->tx_buf);
- ring->tx_buf = NULL;
- }
-
- if (ring->tx_dma) {
- dma_free_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- ring->tx_dma,
- ring->tx_phys);
- ring->tx_dma = NULL;
- }
-}
-
-static void mtk_qdma_tx_clean(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i;
-
- if (ring->tx_buf) {
- for (i = 0; i < ring->tx_ring_size; i++)
- mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
- kfree(ring->tx_buf);
- ring->tx_buf = NULL;
- }
-
- if (ring->tx_dma) {
- dma_free_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- ring->tx_dma,
- ring->tx_phys);
- ring->tx_dma = NULL;
- }
-}
-
-void mtk_stats_update_mac(struct mtk_mac *mac)
-{
- struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
- u64 stats;
-
- base += hw_stats->reg_offset;
-
- u64_stats_update_begin(&hw_stats->syncp);
-
- if (mac->hw->soc->new_stats) {
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
- } else {
- hw_stats->tx_bytes += mtk_r32(mac->hw, base);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x3c);
- }
-
- u64_stats_update_end(&hw_stats->syncp);
-}
-
-static void mtk_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
- unsigned int start;
-
- if (!base) {
- netdev_stats_to_stats64(storage, &dev->stats);
- return;
- }
-
- if (netif_running(dev) && netif_device_present(dev)) {
- if (spin_trylock(&hw_stats->stats_lock)) {
- mtk_stats_update_mac(mac);
- spin_unlock(&hw_stats->stats_lock);
- }
- }
-
- do {
- start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
- storage->rx_packets = hw_stats->rx_packets;
- storage->tx_packets = hw_stats->tx_packets;
- storage->rx_bytes = hw_stats->rx_bytes;
- storage->tx_bytes = hw_stats->tx_bytes;
- storage->collisions = hw_stats->tx_collisions;
- storage->rx_length_errors = hw_stats->rx_short_errors +
- hw_stats->rx_long_errors;
- storage->rx_over_errors = hw_stats->rx_overflow;
- storage->rx_crc_errors = hw_stats->rx_fcs_errors;
- storage->rx_errors = hw_stats->rx_checksum_errors;
- storage->tx_aborted_errors = hw_stats->tx_skip;
- } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-
- storage->tx_errors = dev->stats.tx_errors;
- storage->rx_dropped = dev->stats.rx_dropped;
- storage->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mtk_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 idx = (vid & 0xf);
- u32 vlan_cfg;
-
- if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
- return 0;
-
- if (test_bit(idx, &eth->vlan_map)) {
- netdev_warn(dev, "disable tx vlan offload\n");
- dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
- netdev_update_features(dev);
- } else {
- vlan_cfg = mtk_r32(eth,
- mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- ((idx >> 1) << 2));
- if (idx & 0x1) {
- vlan_cfg &= 0xffff;
- vlan_cfg |= (vid << 16);
- } else {
- vlan_cfg &= 0xffff0000;
- vlan_cfg |= vid;
- }
- mtk_w32(eth,
- vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- ((idx >> 1) << 2));
- set_bit(idx, &eth->vlan_map);
- }
-
- return 0;
-}
-
-static int mtk_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 idx = (vid & 0xf);
-
- if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
- (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
- return 0;
-
- clear_bit(idx, &eth->vlan_map);
-
- return 0;
-}
-
-static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
-{
- barrier();
- return (u32)(ring->tx_ring_size -
- ((ring->tx_next_idx - ring->tx_free_idx) &
- (ring->tx_ring_size - 1)));
-}
-
-static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
-{
- unsigned int len;
- int ret;
-
- if (unlikely(skb->len >= VLAN_ETH_ZLEN))
- return 0;
-
- if (eth->soc->padding_64b && !eth->soc->padding_bug)
- return 0;
-
- if (skb_vlan_tag_present(skb))
- len = ETH_ZLEN;
- else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- len = VLAN_ETH_ZLEN;
- else if (!eth->soc->padding_64b)
- len = ETH_ZLEN;
- else
- return 0;
-
- if (skb->len >= len)
- return 0;
-
- ret = skb_pad(skb, len - skb->len);
- if (ret < 0)
- return ret;
- skb->len = len;
- skb_set_tail_pointer(skb, len);
-
- return ret;
-}
-
-static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
- int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct skb_frag_struct *frag;
- struct mtk_tx_dma txd, *ptxd;
- struct mtk_tx_buf *tx_buf;
- int i, j, k, frag_size, frag_map_size, offset;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
- u32 def_txd4;
-
- if (mtk_skb_padto(skb, eth)) {
- netif_warn(eth, tx_err, dev, "tx padding failed!\n");
- return -1;
- }
-
- tx_buf = &ring->tx_buf[ring->tx_next_idx];
- memset(tx_buf, 0, sizeof(*tx_buf));
- memset(&txd, 0, sizeof(txd));
- nr_frags = skb_shinfo(skb)->nr_frags;
-
- /* init tx descriptor */
- def_txd4 = eth->soc->txd4;
- txd.txd4 = def_txd4;
-
- if (eth->soc->mac_count > 1)
- txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
- if (gso)
- txd.txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd.txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb)) {
- u16 tag = skb_vlan_tag_get(skb);
-
- txd.txd4 |= TX_DMA_INS_VLAN |
- ((tag >> VLAN_PRIO_SHIFT) << 4) |
- (tag & 0xF);
- }
-
- mapped_addr = dma_map_single(&dev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- return -1;
-
- txd.txd1 = mapped_addr;
- txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
-
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
- /* TX SG offload */
- j = ring->tx_next_idx;
- k = 0;
- for (i = 0; i < nr_frags; i++) {
- offset = 0;
- frag = &skb_shinfo(skb)->frags[i];
- frag_size = skb_frag_size(frag);
-
- while (frag_size > 0) {
- frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- goto err_dma;
-
- if (k & 0x1) {
- j = NEXT_TX_DESP_IDX(j);
- txd.txd1 = mapped_addr;
- txd.txd2 = TX_DMA_PLEN0(frag_map_size);
- txd.txd4 = def_txd4;
-
- tx_buf = &ring->tx_buf[j];
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- dma_unmap_addr_set(tx_buf, dma_addr0,
- mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0,
- frag_map_size);
- } else {
- txd.txd3 = mapped_addr;
- txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
-
- tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
- tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
- dma_unmap_addr_set(tx_buf, dma_addr1,
- mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len1,
- frag_map_size);
-
- if (!((i == (nr_frags - 1)) &&
- (frag_map_size == frag_size))) {
- mtk_set_txd_pdma(&txd,
- &ring->tx_dma[j]);
- memset(&txd, 0, sizeof(txd));
- }
- }
- frag_size -= frag_map_size;
- offset += frag_map_size;
- k++;
- }
- }
-
- /* set last segment */
- if (k & 0x1)
- txd.txd2 |= TX_DMA_LS1;
- else
- txd.txd2 |= TX_DMA_LS0;
- mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
-
- /* store skb to cleanup */
- tx_buf->skb = skb;
-
- netdev_sent_queue(dev, skb->len);
- skb_tx_timestamp(skb);
-
- ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
- !netdev_xmit_more())
- mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
-
- return 0;
-
-err_dma:
- j = ring->tx_next_idx;
- for (i = 0; i < tx_num; i++) {
- ptxd = &ring->tx_dma[j];
- tx_buf = &ring->tx_buf[j];
-
- /* unmap dma */
- mtk_txd_unmap(&dev->dev, tx_buf);
-
- ptxd->txd2 = TX_DMA_DESP2_DEF;
- j = NEXT_TX_DESP_IDX(j);
- }
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
- return -1;
-}
-
-/* the qdma core needs scratch memory to be setup */
-static int mtk_init_fq_dma(struct mtk_eth *eth)
-{
- dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
- int cnt = eth->soc->dma_ring_size;
- int i;
-
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
- GFP_ATOMIC | __GFP_ZERO);
- if (unlikely(!eth->scratch_ring))
- return -ENOMEM;
-
- eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
- GFP_KERNEL);
- dma_addr = dma_map_single(eth->dev,
- eth->scratch_head, cnt * QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- return -ENOMEM;
-
- memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
-
- for (i = 0; i < cnt; i++) {
- eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
- if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
- ((i + 1) * sizeof(struct mtk_tx_dma)));
- eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
- }
-
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
- mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
-
- return 0;
-}
-
-static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
-{
- void *ret = ring->tx_dma;
-
- return ret + (desc - ring->tx_phys);
-}
-
-static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
-{
- return mtk_qdma_phys_to_virt(ring, txd->txd2);
-}
-
-static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
-{
- int idx = txd - ring->tx_dma;
-
- return &ring->tx_buf[idx];
-}
-
-static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
- int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_dma *itxd, *txd;
- struct mtk_tx_buf *tx_buf;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
- int i, n_desc = 1;
- u32 txd4 = eth->soc->txd4;
-
- itxd = ring->tx_next_free;
- if (itxd == ring->tx_last_free)
- return -ENOMEM;
-
- if (eth->soc->mac_count > 1)
- txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- if (gso)
- txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
-
- mapped_addr = dma_map_single(&dev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- return -ENOMEM;
-
- WRITE_ONCE(itxd->txd1, mapped_addr);
- tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
- /* TX SG offload */
- txd = itxd;
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- unsigned int offset = 0;
- int frag_size = skb_frag_size(frag);
-
- while (frag_size) {
- bool last_frag = false;
- unsigned int frag_map_size;
-
- txd = mtk_tx_next_qdma(ring, txd);
- if (txd == ring->tx_last_free)
- goto err_dma;
-
- n_desc++;
- frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
- goto err_dma;
-
- if (i == nr_frags - 1 &&
- (frag_size - frag_map_size) == 0)
- last_frag = true;
-
- WRITE_ONCE(txd->txd1, mapped_addr);
- WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
- TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0) |
- mac->id);
- WRITE_ONCE(txd->txd4, 0);
-
- tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
- memset(tx_buf, 0, sizeof(*tx_buf));
-
- tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
- frag_size -= frag_map_size;
- offset += frag_map_size;
- }
- }
-
- /* store skb to cleanup */
- tx_buf->skb = skb;
-
- WRITE_ONCE(itxd->txd4, txd4);
- WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
- (!nr_frags * TX_DMA_LS0)));
-
- netdev_sent_queue(dev, skb->len);
- skb_tx_timestamp(skb);
-
- ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
- atomic_sub(n_desc, &ring->tx_free_count);
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
- !netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-
- return 0;
-
-err_dma:
- do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
-
- /* unmap dma */
- mtk_txd_unmap(&dev->dev, tx_buf);
-
- itxd->txd3 = TX_DMA_DESP2_DEF;
- itxd = mtk_tx_next_qdma(ring, itxd);
- } while (itxd != txd);
-
- return -ENOMEM;
-}
-
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
-{
- int i, nfrags;
- struct skb_frag_struct *frag;
-
- nfrags = 1;
- if (skb_is_gso(skb)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
- }
- } else {
- nfrags += skb_shinfo(skb)->nr_frags;
- }
-
- return DIV_ROUND_UP(nfrags, 2);
-}
-
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct net_device_stats *stats = &dev->stats;
- int tx_num;
- int len = skb->len;
- bool gso = false;
-
- tx_num = mtk_cal_txd_req(skb);
- if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
- netif_stop_queue(dev);
- netif_err(eth, tx_queued, dev,
- "Tx Ring full when queue awake!\n");
- return NETDEV_TX_BUSY;
- }
-
- /* TSO: fill MSS info in tcp checksum field */
- if (skb_is_gso(skb)) {
- if (skb_cow_head(skb, 0)) {
- netif_warn(eth, tx_err, dev,
- "GSO expand head fail.\n");
- goto drop;
- }
-
- if (skb_shinfo(skb)->gso_type &
- (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- gso = true;
- tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
- }
- }
-
- if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
- goto drop;
-
- stats->tx_packets++;
- stats->tx_bytes += len;
-
- if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
- netif_stop_queue(dev);
- smp_mb();
- if (unlikely(atomic_read(&ring->tx_free_count) >
- ring->tx_thresh))
- netif_wake_queue(dev);
- }
-
- return NETDEV_TX_OK;
-
-drop:
- stats->tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static int mtk_poll_rx(struct napi_struct *napi, int budget,
- struct mtk_eth *eth, u32 rx_intr)
-{
- struct mtk_soc_data *soc = eth->soc;
- struct mtk_rx_ring *ring = &eth->rx_ring[0];
- int idx = ring->rx_calc_idx;
- u32 checksum_bit;
- struct sk_buff *skb;
- u8 *data, *new_data;
- struct mtk_rx_dma *rxd, trxd;
- int done = 0, pad;
-
- if (eth->soc->hw_features & NETIF_F_RXCSUM)
- checksum_bit = soc->checksum_bit;
- else
- checksum_bit = 0;
-
- if (eth->soc->rx_2b_offset)
- pad = 0;
- else
- pad = NET_IP_ALIGN;
-
- while (done < budget) {
- struct net_device *netdev;
- unsigned int pktlen;
- dma_addr_t dma_addr;
- int mac = 0;
-
- idx = NEXT_RX_DESP_IDX(idx);
- rxd = &ring->rx_dma[idx];
- data = ring->rx_data[idx];
-
- mtk_get_rxd(&trxd, rxd);
- if (!(trxd.rxd2 & RX_DMA_DONE))
- break;
-
- /* find out which mac the packet come from. values start at 1 */
- if (eth->soc->mac_count > 1) {
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
- if (mac < 0 || mac >= eth->soc->mac_count)
- goto release_desc;
- }
-
- netdev = eth->netdev[mac];
-
- /* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
- if (unlikely(!new_data || !netdev)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(&netdev->dev,
- new_data + NET_SKB_PAD + pad,
- ring->rx_buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
- skb_free_frag(new_data);
- goto release_desc;
- }
-
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_data));
- goto release_desc;
- }
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
- dma_unmap_single(&netdev->dev, trxd.rxd1,
- ring->rx_buf_size, DMA_FROM_DEVICE);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
- skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & checksum_bit)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, netdev);
-
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += pktlen;
-
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- RX_DMA_VID(trxd.rxd3))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
- napi_gro_receive(napi, skb);
-
- ring->rx_data[idx] = new_data;
- rxd->rxd1 = (unsigned int)dma_addr;
-
-release_desc:
- if (eth->soc->rx_sg_dma)
- rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
- else
- rxd->rxd2 = RX_DMA_LSO;
-
- ring->rx_calc_idx = idx;
- /* make sure that all changes to the dma ring are flushed before
- * we continue
- */
- wmb();
- if (eth->soc->dma_type == MTK_QDMA)
- mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
- else
- mtk_reg_w32(eth, ring->rx_calc_idx,
- MTK_REG_RX_CALC_IDX0);
- done++;
- }
-
- if (done < budget)
- mtk_irq_ack(eth, rx_intr);
-
- return done;
-}
-
-static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
- struct sk_buff *skb;
- struct mtk_tx_buf *tx_buf;
- int done = 0;
- u32 idx, hwidx;
- struct mtk_tx_ring *ring = &eth->tx_ring;
- unsigned int bytes = 0;
-
- idx = ring->tx_free_idx;
- hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
-
- while ((idx != hwidx) && budget) {
- tx_buf = &ring->tx_buf[idx];
- skb = tx_buf->skb;
-
- if (!skb)
- break;
-
- if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
- bytes += skb->len;
- done++;
- budget--;
- }
- mtk_txd_unmap(eth->dev, tx_buf);
- idx = NEXT_TX_DESP_IDX(idx);
- }
- ring->tx_free_idx = idx;
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
- /* read hw index again make sure no new tx packet */
- if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
- *tx_again = 1;
-
- if (done)
- netdev_completed_queue(*eth->netdev, done, bytes);
-
- return done;
-}
-
-static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
- struct mtk_tx_buf *tx_buf;
- int total = 0, done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
- u32 cpu, dma;
- int i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
-
- cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
-
- desc = mtk_qdma_phys_to_virt(ring, cpu);
-
- while ((cpu != dma) && budget) {
- u32 next_cpu = desc->txd2;
- int mac;
-
- desc = mtk_tx_next_qdma(ring, desc);
- if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
- break;
-
- mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
- TX_DMA_FPORT_MASK;
- mac--;
-
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
- skb = tx_buf->skb;
- if (!skb)
- break;
-
- if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
- budget--;
- }
- mtk_txd_unmap(eth->dev, tx_buf);
-
- ring->tx_last_free->txd2 = next_cpu;
- ring->tx_last_free = desc;
- atomic_inc(&ring->tx_free_count);
-
- cpu = next_cpu;
- }
-
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
-
- /* read hw index again make sure no new tx packet */
- if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
- *tx_again = true;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- if (!done[i])
- continue;
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
- total += done[i];
- }
-
- return total;
-}
-
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
- bool *tx_again)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- struct net_device *netdev = eth->netdev[0];
- int done;
-
- done = eth->tx_ring.tx_poll(eth, budget, tx_again);
- if (!*tx_again)
- mtk_irq_ack(eth, tx_intr);
-
- if (!done)
- return 0;
-
- smp_mb();
- if (unlikely(!netif_queue_stopped(netdev)))
- return done;
-
- if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
- netif_wake_queue(netdev);
-
- return done;
-}
-
-static void mtk_stats_update(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- if (!eth->mac[i] || !eth->mac[i]->hw_stats)
- continue;
- if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
- mtk_stats_update_mac(eth->mac[i]);
- spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
- }
- }
-}
-
-static int mtk_poll(struct napi_struct *napi, int budget)
-{
- struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
- int tx_done, rx_done;
- bool tx_again = false;
-
- status = mtk_irq_pending(eth);
- mtk_status = mtk_irq_pending_status(eth);
- tx_intr = eth->soc->tx_int;
- rx_intr = eth->soc->rx_int;
- status_intr = eth->soc->status_int;
- tx_done = 0;
- rx_done = 0;
- tx_again = 0;
-
- if (status & tx_intr)
- tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
-
- if (status & rx_intr)
- rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
-
- if (unlikely(mtk_status & status_intr)) {
- mtk_stats_update(eth);
- mtk_irq_ack_status(eth, status_intr);
- }
-
- if (unlikely(netif_msg_intr(eth))) {
- mask = mtk_irq_enabled(eth);
- netdev_info(eth->netdev[0],
- "done tx %d, rx %d, intr 0x%08x/0x%x\n",
- tx_done, rx_done, status, mask);
- }
-
- if (tx_again || rx_done == budget)
- return budget;
-
- status = mtk_irq_pending(eth);
- if (status & (tx_intr | rx_intr))
- return budget;
-
- napi_complete(napi);
- mtk_irq_enable(eth, tx_intr | rx_intr);
-
- return rx_done;
-}
-
-static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
-{
- int i;
- struct mtk_tx_ring *ring = &eth->tx_ring;
-
- ring->tx_ring_size = eth->soc->dma_ring_size;
- ring->tx_free_idx = 0;
- ring->tx_next_idx = 0;
- ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
- MAX_SKB_FRAGS);
-
- ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
- GFP_KERNEL);
- if (!ring->tx_buf)
- goto no_tx_mem;
-
- ring->tx_dma =
- dma_alloc_coherent(eth->dev,
- ring->tx_ring_size * sizeof(*ring->tx_dma),
- &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
- if (!ring->tx_dma)
- goto no_tx_mem;
-
- for (i = 0; i < ring->tx_ring_size; i++) {
- ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
- ring->tx_dma[i].txd4 = eth->soc->txd4;
- }
-
- atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
- ring->tx_map = mtk_pdma_tx_map;
- ring->tx_poll = mtk_pdma_tx_poll;
- ring->tx_clean = mtk_pdma_tx_clean;
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
- mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
- mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
- mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-
-no_tx_mem:
- return -ENOMEM;
-}
-
-static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
-{
- struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = sizeof(*ring->tx_dma);
-
- ring->tx_ring_size = eth->soc->dma_ring_size;
- ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
- GFP_KERNEL);
- if (!ring->tx_buf)
- goto no_tx_mem;
-
- ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
- &ring->tx_phys,
- GFP_ATOMIC | __GFP_ZERO);
- if (!ring->tx_dma)
- goto no_tx_mem;
-
- for (i = 0; i < ring->tx_ring_size; i++) {
- int next = (i + 1) % ring->tx_ring_size;
- u32 next_ptr = ring->tx_phys + next * sz;
-
- ring->tx_dma[i].txd2 = next_ptr;
- ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
- }
-
- atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
- ring->tx_next_free = &ring->tx_dma[0];
- ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
- ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
- MAX_SKB_FRAGS);
-
- ring->tx_map = mtk_qdma_tx_map;
- ring->tx_poll = mtk_qdma_tx_poll;
- ring->tx_clean = mtk_qdma_tx_clean;
-
- /* make sure that all changes to the dma ring are flushed before we
- * continue
- */
- wmb();
-
- mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
- mtk_w32(eth,
- ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
- MTK_QTX_DRX_PTR);
-
- return 0;
-
-no_tx_mem:
- return -ENOMEM;
-}
-
-static int mtk_qdma_init(struct mtk_eth *eth, int ring)
-{
- int err;
-
- err = mtk_init_fq_dma(eth);
- if (err)
- return err;
-
- err = mtk_qdma_tx_alloc_tx(eth);
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
- if (err)
- return err;
-
- mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
- mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
- mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
- mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
-
- /* Enable random early drop and set drop threshold automatically */
- mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
-
- return 0;
-}
-
-static int mtk_pdma_qdma_init(struct mtk_eth *eth)
-{
- int err = mtk_qdma_init(eth, 1);
-
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
- if (err)
- return err;
-
- mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
- mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
- mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
- mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-}
-
-static int mtk_pdma_init(struct mtk_eth *eth)
-{
- struct mtk_rx_ring *ring = &eth->rx_ring[0];
- int err;
-
- err = mtk_pdma_tx_alloc(eth);
- if (err)
- return err;
-
- err = mtk_dma_rx_alloc(eth, ring);
- if (err)
- return err;
-
- mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
- mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
- mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
- mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
- return 0;
-}
-
-static void mtk_dma_free(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
- eth->tx_ring.tx_clean(eth);
- mtk_clean_rx(eth, &eth->rx_ring[0]);
- mtk_clean_rx(eth, &eth->rx_ring[1]);
- kfree(eth->scratch_head);
-}
-
-static void mtk_tx_timeout(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_ring *ring = &eth->tx_ring;
-
- eth->netdev[mac->id]->stats.tx_errors++;
- netif_err(eth, tx_err, dev,
- "transmit timed out\n");
- if (eth->soc->dma_type & MTK_PDMA) {
- netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
- mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
- netif_info(eth, drv, dev,
- "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
- 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
- mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
- mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
- mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
- ring->tx_free_idx,
- ring->tx_next_idx);
- }
- if (eth->soc->dma_type & MTK_QDMA) {
- netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
- mtk_r32(eth, MTK_QDMA_GLO_CFG));
- netif_info(eth, drv, dev,
- "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
- 0, mtk_r32(eth, MTK_QTX_CTX_PTR),
- mtk_r32(eth, MTK_QTX_DTX_PTR),
- mtk_r32(eth, MTK_QTX_CRX_PTR),
- mtk_r32(eth, MTK_QTX_DRX_PTR),
- atomic_read(&ring->tx_free_count));
- }
- netif_info(eth, drv, dev,
- "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
- 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
- mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
- mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
- mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
-
- schedule_work(&mac->pending_work);
-}
-
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
-{
- struct mtk_eth *eth = _eth;
- u32 status, int_mask;
-
- status = mtk_irq_pending(eth);
- if (unlikely(!status))
- return IRQ_NONE;
-
- int_mask = (eth->soc->rx_int | eth->soc->tx_int);
- if (likely(status & int_mask)) {
- if (likely(napi_schedule_prep(&eth->rx_napi)))
- __napi_schedule(&eth->rx_napi);
- } else {
- mtk_irq_ack(eth, status);
- }
- mtk_irq_disable(eth, int_mask);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mtk_poll_controller(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
-
- mtk_irq_disable(eth, int_mask);
- mtk_handle_irq(dev->irq, dev);
- mtk_irq_enable(eth, int_mask);
-}
-#endif
-
-int mtk_set_clock_cycle(struct mtk_eth *eth)
-{
- unsigned long sysclk = eth->sysclk;
-
- sysclk /= MTK_US_CYC_CNT_DIVISOR;
- sysclk <<= MTK_US_CYC_CNT_SHIFT;
-
- mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
- ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
- sysclk,
- MTK_GLO_CFG);
- return 0;
-}
-
-void mtk_fwd_config(struct mtk_eth *eth)
-{
- u32 fwd_cfg;
-
- fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-
- /* disable jumbo frame */
- if (eth->soc->jumbo_frame)
- fwd_cfg &= ~MTK_GDM1_JMB_EN;
-
- /* set unicast/multicast/broadcast frame to cpu */
- fwd_cfg &= ~0xffff;
-
- mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-}
-
-void mtk_csum_config(struct mtk_eth *eth)
-{
- if (eth->soc->hw_features & NETIF_F_RXCSUM)
- mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
- (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
- MTK_GDMA1_FWD_CFG);
- else
- mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
- ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
- MTK_GDMA1_FWD_CFG);
- if (eth->soc->hw_features & NETIF_F_IP_CSUM)
- mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
- (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
- MTK_CDMA_CSG_CFG);
- else
- mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
- ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
- MTK_CDMA_CSG_CFG);
-}
-
-static int mtk_start_dma(struct mtk_eth *eth)
-{
- unsigned long flags;
- u32 val;
- int err;
-
- if (eth->soc->dma_type == MTK_PDMA)
- err = mtk_pdma_init(eth);
- else if (eth->soc->dma_type == MTK_QDMA)
- err = mtk_qdma_init(eth, 0);
- else
- err = mtk_pdma_qdma_init(eth);
- if (err) {
- mtk_dma_free(eth);
- return err;
- }
-
- spin_lock_irqsave(&eth->page_lock, flags);
-
- val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
- if (eth->soc->rx_2b_offset)
- val |= MTK_RX_2B_OFFSET;
- val |= eth->soc->pdma_glo_cfg;
-
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
-
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
-
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
- return 0;
-}
-
-static int mtk_open(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
-
- if (!atomic_read(&eth->dma_refcnt)) {
- int err = mtk_start_dma(eth);
-
- if (err)
- return err;
-
- napi_enable(&eth->rx_napi);
- mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
- }
- atomic_inc(&eth->dma_refcnt);
-
- if (eth->phy)
- eth->phy->start(mac);
-
- if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
- netif_carrier_on(dev);
-
- netif_start_queue(dev);
- eth->soc->fwd_config(eth);
-
- return 0;
-}
-
-static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
-{
- unsigned long flags;
- u32 val;
- int i;
-
- /* stop the dma enfine */
- spin_lock_irqsave(&eth->page_lock, flags);
- val = mtk_r32(eth, glo_cfg);
- mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
- glo_cfg);
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
- /* wait for dma stop */
- for (i = 0; i < 10; i++) {
- val = mtk_r32(eth, glo_cfg);
- if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
- msleep(20);
- continue;
- }
- break;
- }
-}
-
-static int mtk_stop(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- netif_tx_disable(dev);
- if (eth->phy)
- eth->phy->stop(mac);
-
- if (!atomic_dec_and_test(&eth->dma_refcnt))
- return 0;
-
- mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
- napi_disable(&eth->rx_napi);
-
- if (eth->soc->dma_type & MTK_PDMA)
- mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
-
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-
- mtk_dma_free(eth);
-
- return 0;
-}
-
-static int __init mtk_init_hw(struct mtk_eth *eth)
-{
- int i, err;
-
- eth->soc->reset_fe(eth);
-
- if (eth->soc->switch_init)
- if (eth->soc->switch_init(eth)) {
- dev_err(eth->dev, "failed to initialize switch core\n");
- return -ENODEV;
- }
-
- err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
-
- err = mtk_mdio_init(eth);
- if (err)
- return err;
-
- /* disable delay and normal interrupt */
- mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
- if (eth->soc->dma_type & MTK_QDMA)
- mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-
- /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
- if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
- for (i = 0; i < 16; i += 2)
- mtk_w32(eth, ((i + 1) << 16) + i,
- mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
- (i * 2));
-
- if (eth->soc->fwd_config(eth))
- dev_err(eth->dev, "unable to get clock\n");
-
- if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
- mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
- mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
- }
-
- return 0;
-}
-
-static int __init mtk_init(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct device_node *port;
- const char *mac_addr;
- int err;
-
- mac_addr = of_get_mac_address(mac->of_node);
- if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
-
- /* If the mac address is invalid, use random mac address */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- eth_hw_addr_random(dev);
- dev_err(eth->dev, "generated random MAC address %pM\n",
- dev->dev_addr);
- }
- mac->hw->soc->set_mac(mac, dev->dev_addr);
-
- if (eth->soc->port_init)
- for_each_child_of_node(mac->of_node, port)
- if (of_device_is_compatible(port,
- "mediatek,eth-port") &&
- of_device_is_available(port))
- eth->soc->port_init(eth, mac, port);
-
- if (eth->phy) {
- err = eth->phy->connect(mac);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static void mtk_uninit(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
-
- if (eth->phy)
- eth->phy->disconnect(mac);
- mtk_mdio_cleanup(eth);
-
- mtk_irq_disable(eth, ~0);
- free_irq(dev->irq, dev);
-}
-
-static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct mtk_mac *mac = netdev_priv(dev);
-
- if (!mac->phy_dev)
- return -ENODEV;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
- default:
- break;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int mtk_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- int frag_size, old_mtu;
- u32 fwd_cfg;
-
- if (!eth->soc->jumbo_frame)
- return eth_change_mtu(dev, new_mtu);
-
- frag_size = mtk_max_frag_size(new_mtu);
- if (new_mtu < 68 || frag_size > PAGE_SIZE)
- return -EINVAL;
-
- old_mtu = dev->mtu;
- dev->mtu = new_mtu;
-
- /* return early if the buffer sizes will not change */
- if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
- return 0;
- if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
- return 0;
-
- if (new_mtu <= ETH_DATA_LEN)
- eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
- else
- eth->rx_ring[0].frag_size = PAGE_SIZE;
- eth->rx_ring[0].rx_buf_size =
- mtk_max_buf_size(eth->rx_ring[0].frag_size);
-
- if (!netif_running(dev))
- return 0;
-
- mtk_stop(dev);
- fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
- if (new_mtu <= ETH_DATA_LEN) {
- fwd_cfg &= ~MTK_GDM1_JMB_EN;
- } else {
- fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
- fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
- MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
- }
- mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-
- return mtk_open(dev);
-}
-
-static void mtk_pending_work(struct work_struct *work)
-{
- struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
- struct mtk_eth *eth = mac->hw;
- struct net_device *dev = eth->netdev[mac->id];
- int err;
-
- rtnl_lock();
- mtk_stop(dev);
-
- err = mtk_open(dev);
- if (err) {
- netif_alert(eth, ifup, dev,
- "Driver up/down cycle failed, closing device.\n");
- dev_close(dev);
- }
- rtnl_unlock();
-}
-
-static int mtk_cleanup(struct mtk_eth *eth)
-{
- int i;
-
- for (i = 0; i < eth->soc->mac_count; i++) {
- struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
- if (!eth->netdev[i])
- continue;
-
- unregister_netdev(eth->netdev[i]);
- free_netdev(eth->netdev[i]);
- cancel_work_sync(&mac->pending_work);
- }
-
- return 0;
-}
-
-static const struct net_device_ops mtk_netdev_ops = {
- .ndo_init = mtk_init,
- .ndo_uninit = mtk_uninit,
- .ndo_open = mtk_open,
- .ndo_stop = mtk_stop,
- .ndo_start_xmit = mtk_start_xmit,
- .ndo_set_mac_address = mtk_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = mtk_do_ioctl,
- .ndo_change_mtu = mtk_change_mtu,
- .ndo_tx_timeout = mtk_tx_timeout,
- .ndo_get_stats64 = mtk_get_stats64,
- .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mtk_poll_controller,
-#endif
-};
-
-static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-{
- struct mtk_mac *mac;
- const __be32 *_id = of_get_property(np, "reg", NULL);
- int id, err;
-
- if (!_id) {
- dev_err(eth->dev, "missing mac id\n");
- return -EINVAL;
- }
- id = be32_to_cpup(_id);
- if (id >= eth->soc->mac_count || eth->netdev[id]) {
- dev_err(eth->dev, "%d is not a valid mac id\n", id);
- return -EINVAL;
- }
-
- eth->netdev[id] = alloc_etherdev(sizeof(*mac));
- if (!eth->netdev[id]) {
- dev_err(eth->dev, "alloc_etherdev failed\n");
- return -ENOMEM;
- }
- mac = netdev_priv(eth->netdev[id]);
- eth->mac[id] = mac;
- mac->id = id;
- mac->hw = eth;
- mac->of_node = np;
- INIT_WORK(&mac->pending_work, mtk_pending_work);
-
- if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
- mac->hw_stats = devm_kzalloc(eth->dev,
- sizeof(*mac->hw_stats),
- GFP_KERNEL);
- if (!mac->hw_stats) {
- err = -ENOMEM;
- goto free_netdev;
- }
- spin_lock_init(&mac->hw_stats->stats_lock);
- mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
- }
-
- SET_NETDEV_DEV(eth->netdev[id], eth->dev);
- eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
- eth->netdev[id]->base_addr = (unsigned long)eth->base;
-
- if (eth->soc->init_data)
- eth->soc->init_data(eth->soc, eth->netdev[id]);
-
- eth->netdev[id]->vlan_features = eth->soc->hw_features &
- ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
- eth->netdev[id]->features |= eth->soc->hw_features;
-
- if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
- eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- mtk_set_ethtool_ops(eth->netdev[id]);
-
- err = register_netdev(eth->netdev[id]);
- if (err) {
- dev_err(eth->dev, "error bringing up device\n");
- err = -ENOMEM;
- goto free_netdev;
- }
- eth->netdev[id]->irq = eth->irq;
- netif_info(eth, probe, eth->netdev[id],
- "mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->netdev[id]->irq);
-
- return 0;
-
-free_netdev:
- free_netdev(eth->netdev[id]);
- return err;
-}
-
-static int mtk_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- const struct of_device_id *match;
- struct device_node *mac_np;
- struct mtk_soc_data *soc;
- struct mtk_eth *eth;
- struct clk *sysclk;
- int err;
-
- device_reset(&pdev->dev);
-
- match = of_match_device(of_mtk_match, &pdev->dev);
- soc = (struct mtk_soc_data *)match->data;
-
- if (soc->reg_table)
- mtk_reg_table = soc->reg_table;
-
- eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
- if (!eth)
- return -ENOMEM;
-
- eth->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(eth->base))
- return PTR_ERR(eth->base);
-
- spin_lock_init(&eth->page_lock);
-
- eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,ethsys");
- if (IS_ERR(eth->ethsys))
- return PTR_ERR(eth->ethsys);
-
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
- }
-
- sysclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sysclk)) {
- dev_err(&pdev->dev,
- "the clock is not defined in the devicetree\n");
- return -ENXIO;
- }
- eth->sysclk = clk_get_rate(sysclk);
-
- eth->switch_np = of_parse_phandle(pdev->dev.of_node,
- "mediatek,switch", 0);
- if (soc->has_switch && !eth->switch_np) {
- dev_err(&pdev->dev, "failed to read switch phandle\n");
- return -ENODEV;
- }
-
- eth->dev = &pdev->dev;
- eth->soc = soc;
- eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
-
- err = mtk_init_hw(eth);
- if (err)
- return err;
-
- if (eth->soc->mac_count > 1) {
- for_each_child_of_node(pdev->dev.of_node, mac_np) {
- if (!of_device_is_compatible(mac_np,
- "mediatek,eth-mac"))
- continue;
-
- if (!of_device_is_available(mac_np))
- continue;
-
- err = mtk_add_mac(eth, mac_np);
- if (err)
- goto err_free_dev;
- }
-
- init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
- soc->napi_weight);
- } else {
- err = mtk_add_mac(eth, pdev->dev.of_node);
- if (err)
- goto err_free_dev;
- netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
- soc->napi_weight);
- }
-
- platform_set_drvdata(pdev, eth);
-
- return 0;
-
-err_free_dev:
- mtk_cleanup(eth);
- return err;
-}
-
-static int mtk_remove(struct platform_device *pdev)
-{
- struct mtk_eth *eth = platform_get_drvdata(pdev);
-
- netif_napi_del(&eth->rx_napi);
- mtk_cleanup(eth);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver mtk_driver = {
- .probe = mtk_probe,
- .remove = mtk_remove,
- .driver = {
- .name = "mtk_soc_eth",
- .of_match_table = of_mtk_match,
- },
-};
-
-module_platform_driver(mtk_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644
index e6ed80433f49..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ /dev/null
@@ -1,716 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETH_H
-#define MTK_ETH_H
-
-#include <linux/mii.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/ethtool.h>
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-/* these registers have different offsets depending on the SoC. we use a lookup
- * table for these
- */
-enum mtk_reg {
- MTK_REG_PDMA_GLO_CFG = 0,
- MTK_REG_PDMA_RST_CFG,
- MTK_REG_DLY_INT_CFG,
- MTK_REG_TX_BASE_PTR0,
- MTK_REG_TX_MAX_CNT0,
- MTK_REG_TX_CTX_IDX0,
- MTK_REG_TX_DTX_IDX0,
- MTK_REG_RX_BASE_PTR0,
- MTK_REG_RX_MAX_CNT0,
- MTK_REG_RX_CALC_IDX0,
- MTK_REG_RX_DRX_IDX0,
- MTK_REG_MTK_INT_ENABLE,
- MTK_REG_MTK_INT_STATUS,
- MTK_REG_MTK_DMA_VID_BASE,
- MTK_REG_MTK_COUNTER_BASE,
- MTK_REG_MTK_RST_GL,
- MTK_REG_MTK_INT_STATUS2,
- MTK_REG_COUNT
-};
-
-/* delayed interrupt bits */
-#define MTK_DELAY_EN_INT 0x80
-#define MTK_DELAY_MAX_INT 0x04
-#define MTK_DELAY_MAX_TOUT 0x04
-#define MTK_DELAY_TIME 20
-#define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
- | MTK_DELAY_MAX_TOUT)
-#define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
-#define MTK_PSE_FQFC_CFG_INIT 0x80504000
-#define MTK_PSE_FQFC_CFG_256Q 0xff908000
-
-/* interrupt bits */
-#define MTK_CNT_PPE_AF BIT(31)
-#define MTK_CNT_GDM_AF BIT(29)
-#define MTK_PSE_P2_FC BIT(26)
-#define MTK_PSE_BUF_DROP BIT(24)
-#define MTK_GDM_OTHER_DROP BIT(23)
-#define MTK_PSE_P1_FC BIT(22)
-#define MTK_PSE_P0_FC BIT(21)
-#define MTK_PSE_FQ_EMPTY BIT(20)
-#define MTK_GE1_STA_CHG BIT(18)
-#define MTK_TX_COHERENT BIT(17)
-#define MTK_RX_COHERENT BIT(16)
-#define MTK_TX_DONE_INT3 BIT(11)
-#define MTK_TX_DONE_INT2 BIT(10)
-#define MTK_TX_DONE_INT1 BIT(9)
-#define MTK_TX_DONE_INT0 BIT(8)
-#define MTK_RX_DONE_INT0 BIT(2)
-#define MTK_TX_DLY_INT BIT(1)
-#define MTK_RX_DLY_INT BIT(0)
-
-#define MTK_RX_DONE_INT MTK_RX_DONE_INT0
-#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
- MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-#define RT5350_RX_DLY_INT BIT(30)
-#define RT5350_TX_DLY_INT BIT(28)
-#define RT5350_RX_DONE_INT1 BIT(17)
-#define RT5350_RX_DONE_INT0 BIT(16)
-#define RT5350_TX_DONE_INT3 BIT(3)
-#define RT5350_TX_DONE_INT2 BIT(2)
-#define RT5350_TX_DONE_INT1 BIT(1)
-#define RT5350_TX_DONE_INT0 BIT(0)
-
-#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
-#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
- RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
-
-/* registers */
-#define MTK_GDMA_OFFSET 0x0020
-#define MTK_PSE_OFFSET 0x0040
-#define MTK_GDMA2_OFFSET 0x0060
-#define MTK_CDMA_OFFSET 0x0080
-#define MTK_DMA_VID0 0x00a8
-#define MTK_PDMA_OFFSET 0x0100
-#define MTK_PPE_OFFSET 0x0200
-#define MTK_CMTABLE_OFFSET 0x0400
-#define MTK_POLICYTABLE_OFFSET 0x1000
-
-#define MT7621_GDMA_OFFSET 0x0500
-#define MT7620_GDMA_OFFSET 0x0600
-
-#define RT5350_PDMA_OFFSET 0x0800
-#define RT5350_SDM_OFFSET 0x0c00
-
-#define MTK_MDIO_ACCESS 0x00
-#define MTK_MDIO_CFG 0x04
-#define MTK_GLO_CFG 0x08
-#define MTK_RST_GL 0x0C
-#define MTK_INT_STATUS 0x10
-#define MTK_INT_ENABLE 0x14
-#define MTK_MDIO_CFG2 0x18
-#define MTK_FOC_TS_T 0x1C
-
-#define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00)
-#define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04)
-#define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08)
-#define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C)
-#define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10)
-
-#define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00)
-#define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04)
-#define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08)
-#define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C)
-#define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10)
-
-#define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00)
-#define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04)
-#define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08)
-#define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C)
-
-#define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00)
-#define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04)
-
-#define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000))
-
-/* FIXME this might be different for different SOCs */
-#define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00)
-
-#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00)
-#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04)
-#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08)
-#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C)
-#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10)
-#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14)
-#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18)
-#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C)
-#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20)
-#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24)
-#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28)
-#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C)
-#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30)
-#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34)
-#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38)
-#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C)
-#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100)
-#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104)
-#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108)
-#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C)
-#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110)
-#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114)
-#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118)
-#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C)
-#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204)
-#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208)
-#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c)
-#define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220)
-#define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228)
-#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280)
-
-#define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00)
-#define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04)
-#define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08)
-#define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C)
-#define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10)
-#define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14)
-#define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18)
-#define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C)
-#define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20)
-#define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24)
-#define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28)
-#define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C)
-#define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30)
-#define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34)
-#define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38)
-#define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C)
-#define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40)
-#define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44)
-#define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48)
-#define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C)
-#define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50)
-#define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54)
-#define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58)
-#define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C)
-#define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60)
-#define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64)
-#define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68)
-#define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C)
-
-/* Switch DMA configuration */
-#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00)
-#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04)
-#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08)
-#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C)
-#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10)
-#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100)
-#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104)
-#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108)
-#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C)
-#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110)
-
-#define RT5350_SDM_ICS_EN BIT(16)
-#define RT5350_SDM_TCS_EN BIT(17)
-#define RT5350_SDM_UCS_EN BIT(18)
-
-/* QDMA registers */
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
-#define MTK_QRX_BASE_PTR0 0x1900
-#define MTK_QRX_MAX_CNT0 0x1904
-#define MTK_QRX_CRX_IDX0 0x1908
-#define MTK_QRX_DRX_IDX0 0x190C
-#define MTK_QDMA_GLO_CFG 0x1A04
-#define MTK_QDMA_RST_IDX 0x1A08
-#define MTK_QDMA_DELAY_INT 0x1A0C
-#define MTK_QDMA_FC_THRES 0x1A10
-#define MTK_QMTK_INT_STATUS 0x1A18
-#define MTK_QMTK_INT_ENABLE 0x1A1C
-#define MTK_QDMA_HRED2 0x1A44
-
-#define MTK_QTX_CTX_PTR 0x1B00
-#define MTK_QTX_DTX_PTR 0x1B04
-
-#define MTK_QTX_CRX_PTR 0x1B10
-#define MTK_QTX_DRX_PTR 0x1B14
-
-#define MTK_QDMA_FQ_HEAD 0x1B20
-#define MTK_QDMA_FQ_TAIL 0x1B24
-#define MTK_QDMA_FQ_CNT 0x1B28
-#define MTK_QDMA_FQ_BLEN 0x1B2C
-
-#define QDMA_PAGE_SIZE 2048
-#define QDMA_TX_OWNER_CPU BIT(31)
-#define QDMA_TX_SWC BIT(14)
-#define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16)
-#define QDMA_RES_THRES 4
-
-/* MDIO_CFG register bits */
-#define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29)
-#define MTK_MDIO_CFG_GP1_BP_EN BIT(16)
-#define MTK_MDIO_CFG_GP1_FRC_EN BIT(15)
-#define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13)
-#define MTK_MDIO_CFG_GP1_DUPLEX BIT(12)
-#define MTK_MDIO_CFG_GP1_FC_TX BIT(11)
-#define MTK_MDIO_CFG_GP1_FC_RX BIT(10)
-#define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9)
-#define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6)
-#define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5)
-#define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2)
-#define MTK_MDIO_CFG_TX_CLK_SKEW_0 0
-#define MTK_MDIO_CFG_TX_CLK_SKEW_200 1
-#define MTK_MDIO_CFG_TX_CLK_SKEW_400 2
-#define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3
-
-/* uni-cast port */
-#define MTK_GDM1_JMB_LEN_MASK 0xf
-#define MTK_GDM1_JMB_LEN_SHIFT 28
-#define MTK_GDM1_ICS_EN BIT(22)
-#define MTK_GDM1_TCS_EN BIT(21)
-#define MTK_GDM1_UCS_EN BIT(20)
-#define MTK_GDM1_JMB_EN BIT(19)
-#define MTK_GDM1_STRPCRC BIT(16)
-#define MTK_GDM1_UFRC_P_CPU (0 << 12)
-#define MTK_GDM1_UFRC_P_GDMA1 (1 << 12)
-#define MTK_GDM1_UFRC_P_PPE (6 << 12)
-
-/* checksums */
-#define MTK_ICS_GEN_EN BIT(2)
-#define MTK_UCS_GEN_EN BIT(1)
-#define MTK_TCS_GEN_EN BIT(0)
-
-/* dma mode */
-#define MTK_PDMA BIT(0)
-#define MTK_QDMA BIT(1)
-#define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA)
-
-/* dma ring */
-#define MTK_PST_DRX_IDX0 BIT(16)
-#define MTK_PST_DTX_IDX3 BIT(3)
-#define MTK_PST_DTX_IDX2 BIT(2)
-#define MTK_PST_DTX_IDX1 BIT(1)
-#define MTK_PST_DTX_IDX0 BIT(0)
-
-#define MTK_RX_2B_OFFSET BIT(31)
-#define MTK_TX_WB_DDONE BIT(6)
-#define MTK_RX_DMA_BUSY BIT(3)
-#define MTK_TX_DMA_BUSY BIT(1)
-#define MTK_RX_DMA_EN BIT(2)
-#define MTK_TX_DMA_EN BIT(0)
-
-#define MTK_PDMA_SIZE_4DWORDS (0 << 4)
-#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
-#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
-
-#define MTK_US_CYC_CNT_MASK 0xff
-#define MTK_US_CYC_CNT_SHIFT 0x8
-#define MTK_US_CYC_CNT_DIVISOR 1000000
-
-/* PDMA descriptor rxd2 */
-#define RX_DMA_DONE BIT(31)
-#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
-#define RX_DMA_TAG BIT(15)
-
-/* PDMA descriptor rxd3 */
-#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff)
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
-
-/* PDMA descriptor rxd4 */
-#define RX_DMA_L4VALID BIT(30)
-#define RX_DMA_FPORT_SHIFT 19
-#define RX_DMA_FPORT_MASK 0x7
-
-struct mtk_rx_dma {
- unsigned int rxd1;
- unsigned int rxd2;
- unsigned int rxd3;
- unsigned int rxd4;
-} __packed __aligned(4);
-
-/* PDMA tx descriptor bits */
-#define TX_DMA_BUF_LEN 0x3fff
-#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16)
-#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_LS1 BIT(14)
-#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_DONE BIT(31)
-#define TX_DMA_FPORT_SHIFT 25
-#define TX_DMA_FPORT_MASK 0x7
-#define TX_DMA_INS_VLAN_MT7621 BIT(16)
-#define TX_DMA_INS_VLAN BIT(7)
-#define TX_DMA_INS_PPPOE BIT(12)
-#define TX_DMA_TAG BIT(15)
-#define TX_DMA_TAG_MASK BIT(15)
-#define TX_DMA_QN(_x) ((_x) << 16)
-#define TX_DMA_PN(_x) ((_x) << 24)
-#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
-#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
-#define TX_DMA_UDF BIT(20)
-#define TX_DMA_CHKSUM (0x7 << 29)
-#define TX_DMA_TSO BIT(28)
-#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
-
-/* frame engine counters */
-#define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00)
-#define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300)
-#define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40)
-
-/* phy device flags */
-#define MTK_PHY_FLAG_PORT BIT(0)
-#define MTK_PHY_FLAG_ATTACH BIT(1)
-
-struct mtk_tx_dma {
- unsigned int txd1;
- unsigned int txd2;
- unsigned int txd3;
- unsigned int txd4;
-} __packed __aligned(4);
-
-struct mtk_eth;
-struct mtk_mac;
-
-/* manage the attached phys */
-struct mtk_phy {
- spinlock_t lock;
-
- struct phy_device *phy[8];
- struct device_node *phy_node[8];
- const __be32 *phy_fixed[8];
- int duplex[8];
- int speed[8];
- int tx_fc[8];
- int rx_fc[8];
- int (*connect)(struct mtk_mac *mac);
- void (*disconnect)(struct mtk_mac *mac);
- void (*start)(struct mtk_mac *mac);
- void (*stop)(struct mtk_mac *mac);
-};
-
-/* struct mtk_soc_data - the structure that holds the SoC specific data
- * @reg_table: Some of the legacy registers changed their location
- * over time. Their offsets are stored in this table
- *
- * @init_data: Some features depend on the silicon revision. This
- * callback allows runtime modification of the content of
- * this struct
- * @reset_fe: This callback is used to trigger the reset of the frame
- * engine
- * @set_mac: This callback is used to set the unicast mac address
- * filter
- * @fwd_config: This callback is used to setup the forward config
- * register of the MAC
- * @switch_init: This callback is used to bring up the switch core
- * @port_init: Some SoCs have ports that can be router to a switch port
- * or an external PHY. This callback is used to setup these
- * ports.
- * @has_carrier: This callback allows driver to check if there is a cable
- * attached.
- * @mdio_init: This callbck is used to setup the MDIO bus if one is
- * present
- * @mdio_cleanup: This callback is used to cleanup the MDIO state.
- * @mdio_write: This callback is used to write data to the MDIO bus.
- * @mdio_read: This callback is used to write data to the MDIO bus.
- * @mdio_adjust_link: This callback is used to apply the PHY settings.
- * @piac_offset: the PIAC register has a different different base offset
- * @hw_features: feature set depends on the SoC type
- * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs
- * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs
- * @dma_type: SoCs is PDMA, QDMA or a mix of the 2
- * @pdma_glo_cfg: the default DMA configuration
- * @rx_int: the TX interrupt bits used by the SoC
- * @tx_int: the TX interrupt bits used by the SoC
- * @status_int: the Status interrupt bits used by the SoC
- * @checksum_bit: the bits used to turn on HW checksumming
- * @txd4: default value of the TXD4 descriptor
- * @mac_count: the number of MACs that the SoC has
- * @new_stats: there is a old and new way to read hardware stats
- * registers
- * @jumbo_frame: does the SoC support jumbo frames ?
- * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes
- * @rx_sg_dma: scatter gather support
- * @padding_64b enable 64 bit padding
- * @padding_bug: rt2880 has a padding bug
- * @has_switch: does the SoC have a built-in switch
- *
- * Although all of the supported SoCs share the same basic functionality, there
- * are several SoC specific functions and features that we need to support. This
- * struct holds the SoC specific data so that the common core can figure out
- * how to setup and use these differences.
- */
-struct mtk_soc_data {
- const u16 *reg_table;
-
- void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
- void (*reset_fe)(struct mtk_eth *eth);
- void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
- int (*fwd_config)(struct mtk_eth *eth);
- int (*switch_init)(struct mtk_eth *eth);
- void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *port);
- int (*has_carrier)(struct mtk_eth *eth);
- int (*mdio_init)(struct mtk_eth *eth);
- void (*mdio_cleanup)(struct mtk_eth *eth);
- int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
- u16 val);
- int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
- void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
- u32 piac_offset;
- netdev_features_t hw_features;
- u32 dma_ring_size;
- u32 napi_weight;
- u32 dma_type;
- u32 pdma_glo_cfg;
- u32 rx_int;
- u32 tx_int;
- u32 status_int;
- u32 checksum_bit;
- u32 txd4;
- u32 mac_count;
-
- u32 new_stats:1;
- u32 jumbo_frame:1;
- u32 rx_2b_offset:1;
- u32 rx_sg_dma:1;
- u32 padding_64b:1;
- u32 padding_bug:1;
- u32 has_switch:1;
-};
-
-#define MTK_STAT_OFFSET 0x40
-
-/* struct mtk_hw_stats - the structure that holds the traffic statistics.
- * @stats_lock: make sure that stats operations are atomic
- * @reg_offset: the status register offset of the SoC
- * @syncp: the refcount
- *
- * All of the supported SoCs have hardware counters for traffic statstics.
- * Whenever the status IRQ triggers we can read the latest stats from these
- * counters and store them in this struct.
- */
-struct mtk_hw_stats {
- spinlock_t stats_lock;
- u32 reg_offset;
- struct u64_stats_sync syncp;
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_skip;
- u64 tx_collisions;
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_overflow;
- u64 rx_fcs_errors;
- u64 rx_short_errors;
- u64 rx_long_errors;
- u64 rx_checksum_errors;
- u64 rx_flow_control_packets;
-};
-
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
-enum mtk_tx_flags {
- MTK_TX_FLAGS_SINGLE0 = 0x01,
- MTK_TX_FLAGS_PAGE0 = 0x02,
- MTK_TX_FLAGS_PAGE1 = 0x04,
-};
-
-/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
- * by the TX descriptor s
- * @skb: The SKB pointer of the packet being sent
- * @dma_addr0: The base addr of the first segment
- * @dma_len0: The length of the first segment
- * @dma_addr1: The base addr of the second segment
- * @dma_len1: The length of the second segment
- */
-struct mtk_tx_buf {
- struct sk_buff *skb;
- u32 flags;
- DEFINE_DMA_UNMAP_ADDR(dma_addr0);
- DEFINE_DMA_UNMAP_LEN(dma_len0);
- DEFINE_DMA_UNMAP_ADDR(dma_addr1);
- DEFINE_DMA_UNMAP_LEN(dma_len1);
-};
-
-/* struct mtk_tx_ring - This struct holds info describing a TX ring
- * @tx_dma: The descriptor ring
- * @tx_buf: The memory pointed at by the ring
- * @tx_phys: The physical addr of tx_buf
- * @tx_next_free: Pointer to the next free descriptor
- * @tx_last_free: Pointer to the last free descriptor
- * @tx_thresh: The threshold of minimum amount of free descriptors
- * @tx_map: Callback to map a new packet into the ring
- * @tx_poll: Callback for the housekeeping function
- * @tx_clean: Callback for the cleanup function
- * @tx_ring_size: How many descriptors are in the ring
- * @tx_free_idx: The index of th next free descriptor
- * @tx_next_idx: QDMA uses a linked list. This element points to the next
- * free descriptor in the list
- * @tx_free_count: QDMA uses a linked list. Track how many free descriptors
- * are present
- */
-struct mtk_tx_ring {
- struct mtk_tx_dma *tx_dma;
- struct mtk_tx_buf *tx_buf;
- dma_addr_t tx_phys;
- struct mtk_tx_dma *tx_next_free;
- struct mtk_tx_dma *tx_last_free;
- u16 tx_thresh;
- int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
- struct mtk_tx_ring *ring, bool gso);
- int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
- void (*tx_clean)(struct mtk_eth *eth);
-
- /* PDMA only */
- u16 tx_ring_size;
- u16 tx_free_idx;
-
- /* QDMA only */
- u16 tx_next_idx;
- atomic_t tx_free_count;
-};
-
-/* struct mtk_rx_ring - This struct holds info describing a RX ring
- * @rx_dma: The descriptor ring
- * @rx_data: The memory pointed at by the ring
- * @trx_phys: The physical addr of rx_buf
- * @rx_ring_size: How many descriptors are in the ring
- * @rx_buf_size: The size of each packet buffer
- * @rx_calc_idx: The current head of ring
- */
-struct mtk_rx_ring {
- struct mtk_rx_dma *rx_dma;
- u8 **rx_data;
- dma_addr_t rx_phys;
- u16 rx_ring_size;
- u16 frag_size;
- u16 rx_buf_size;
- u16 rx_calc_idx;
-};
-
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS 2
-
-/* struct mtk_eth - This is the main datasructure for holding the state
- * of the driver
- * @dev: The device pointer
- * @base: The mapped register i/o base
- * @page_lock: Make sure that register operations are atomic
- * @soc: pointer to our SoC specific data
- * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
- * dummy for NAPI to work
- * @netdev: The netdev instances
- * @mac: Each netdev is linked to a physical MAC
- * @switch_np: The phandle for the switch
- * @irq: The IRQ that we are using
- * @msg_enable: Ethtool msg level
- * @ysclk: The sysclk rate - neeed for calibration
- * @ethsys: The register map pointing at the range used to setup
- * MII modes
- * @dma_refcnt: track how many netdevs are using the DMA engine
- * @tx_ring: Pointer to the memore holding info about the TX ring
- * @rx_ring: Pointer to the memore holding info about the RX ring
- * @rx_napi: The NAPI struct
- * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
- * @scratch_head: The scratch memory that scratch_ring points to.
- * @phy: Info about the attached PHYs
- * @mii_bus: If there is a bus we need to create an instance for it
- * @link: Track if the ports have a physical link
- * @sw_priv: Pointer to the switches private data
- * @vlan_map: RX VID tracking
- */
-
-struct mtk_eth {
- struct device *dev;
- void __iomem *base;
- spinlock_t page_lock;
- struct mtk_soc_data *soc;
- struct net_device dummy_dev;
- struct net_device *netdev[MTK_MAX_DEVS];
- struct mtk_mac *mac[MTK_MAX_DEVS];
- struct device_node *switch_np;
- int irq;
- u32 msg_enable;
- unsigned long sysclk;
- struct regmap *ethsys;
- atomic_t dma_refcnt;
- struct mtk_tx_ring tx_ring;
- struct mtk_rx_ring rx_ring[2];
- struct napi_struct rx_napi;
- struct mtk_tx_dma *scratch_ring;
- void *scratch_head;
- struct mtk_phy *phy;
- struct mii_bus *mii_bus;
- int link[8];
- void *sw_priv;
- unsigned long vlan_map;
-};
-
-/* struct mtk_mac - the structure that holds the info about the MACs of the
- * SoC
- * @id: The number of the MAC
- * @of_node: Our devicetree node
- * @hw: Backpointer to our main datastruture
- * @hw_stats: Packet statistics counter
- * @phy_dev: The attached PHY if available
- * @phy_flags: The PHYs flags
- * @pending_work: The workqueue used to reset the dma ring
- */
-struct mtk_mac {
- int id;
- struct device_node *of_node;
- struct mtk_eth *hw;
- struct mtk_hw_stats *hw_stats;
- struct phy_device *phy_dev;
- u32 phy_flags;
- struct work_struct pending_work;
-};
-
-/* the struct describing the SoC. these are declared in the soc_xyz.c files */
-extern const struct of_device_id of_mtk_match[];
-
-/* read the hardware status register */
-void mtk_stats_update_mac(struct mtk_mac *mac);
-
-/* default checksum setup handler */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
-
-/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
-
-/* default clock calibration handler */
-int mtk_set_clock_cycle(struct mtk_eth *eth);
-
-/* default checksum setup handler */
-void mtk_csum_config(struct mtk_eth *eth);
-
-/* default forward config handler */
-void mtk_fwd_config(struct mtk_eth *eth);
-
-#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644
index 5d63b5d96f6b..000000000000
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/if_vlan.h>
-#include <linux/of_net.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-#define MT7620_CDMA_CSG_CFG 0x400
-#define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00)
-#define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04)
-#define MT7621_RESET_FE BIT(6)
-#define MT7621_L4_VALID BIT(24)
-
-#define MT7621_TX_DMA_UDF BIT(19)
-
-#define CDMA_ICS_EN BIT(2)
-#define CDMA_UCS_EN BIT(1)
-#define CDMA_TCS_EN BIT(0)
-
-#define GDMA_ICS_EN BIT(22)
-#define GDMA_TCS_EN BIT(21)
-#define GDMA_UCS_EN BIT(20)
-
-/* frame engine counters */
-#define MT7621_REG_MIB_OFFSET 0x2000
-#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00)
-#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400)
-#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40)
-
-#define GSW_REG_GDMA1_MAC_ADRL 0x508
-#define GSW_REG_GDMA1_MAC_ADRH 0x50C
-#define GSW_REG_GDMA2_MAC_ADRL 0x1508
-#define GSW_REG_GDMA2_MAC_ADRH 0x150C
-
-#define MT7621_MTK_RST_GL 0x04
-#define MT7620_MTK_INT_STATUS2 0x08
-
-/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
- * but after test it should be BIT(13).
- */
-#define MT7621_MTK_GDM1_AF BIT(28)
-#define MT7621_MTK_GDM2_AF BIT(29)
-
-static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
- [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
- [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
- [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
- [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
- [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
- [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
- [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
- [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
- [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
- [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
- [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
- [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
- [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
- [MTK_REG_MTK_DMA_VID_BASE] = 0,
- [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
- [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
- [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
-};
-
-static void mt7621_mtk_reset(struct mtk_eth *eth)
-{
- mtk_reset(eth, MT7621_RESET_FE);
-}
-
-static int mt7621_fwd_config(struct mtk_eth *eth)
-{
- /* Setup GMAC1 only, there is no support for GMAC2 yet */
- mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
- MT7620_GDMA1_FWD_CFG);
-
- /* Enable RX checksum */
- mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
- GDMA_TCS_EN | GDMA_UCS_EN),
- MT7620_GDMA1_FWD_CFG);
-
- /* Enable RX VLan Offloading */
- mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
-
- return 0;
-}
-
-static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mac->hw->page_lock, flags);
- if (mac->id == 0) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
- GSW_REG_GDMA1_MAC_ADRH);
- mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
- (hwaddr[4] << 8) | hwaddr[5],
- GSW_REG_GDMA1_MAC_ADRL);
- }
- if (mac->id == 1) {
- mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
- GSW_REG_GDMA2_MAC_ADRH);
- mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
- (hwaddr[4] << 8) | hwaddr[5],
- GSW_REG_GDMA2_MAC_ADRL);
- }
- spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static struct mtk_soc_data mt7621_data = {
- .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_IPV6_CSUM,
- .dma_type = MTK_PDMA,
- .dma_ring_size = 256,
- .napi_weight = 64,
- .new_stats = 1,
- .padding_64b = 1,
- .rx_2b_offset = 1,
- .rx_sg_dma = 1,
- .has_switch = 1,
- .mac_count = 2,
- .reset_fe = mt7621_mtk_reset,
- .set_mac = mt7621_set_mac,
- .fwd_config = mt7621_fwd_config,
- .switch_init = mtk_gsw_init,
- .reg_table = mt7621_reg_table,
- .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
- .rx_int = RT5350_RX_DONE_INT,
- .tx_int = RT5350_TX_DONE_INT,
- .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
- .checksum_bit = MT7621_L4_VALID,
- .has_carrier = mt7620_has_carrier,
- .mdio_read = mt7620_mdio_read,
- .mdio_write = mt7620_mdio_write,
- .mdio_adjust_link = mt7620_mdio_link_adjust,
-};
-
-const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d33533872a16..c8fa17cfa807 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
config PCI_MT7621
tristate "MediaTek MT7621 PCI Controller"
depends on RALINK
+ depends on PCI
select PCI_DRIVERS_GENERIC
help
This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index d6248eecf123..2aee64fdaec5 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
goto no_phy;
phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ priv->phy_mode);
of_node_put(phy_node);
if (!phydev)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ce61c5670ef6..986db76705cc 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
return np;
}
-static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+ int port)
{
+ struct device_node *np = priv->of_node;
u32 delay_value;
+ bool rx_delay;
+ bool tx_delay;
- if (!of_property_read_u32(np, "rx-delay", &delay_value))
+ /* By default, both RX/TX delay is enabled in
+ * __cvmx_helper_rgmii_enable().
+ */
+ rx_delay = true;
+ tx_delay = true;
+
+ if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
- if (!of_property_read_u32(np, "tx-delay", &delay_value))
+ rx_delay = delay_value > 0;
+ }
+ if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+ tx_delay = delay_value > 0;
+ }
+
+ if (!rx_delay && !tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+ else if (!rx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+ else if (!tx_delay)
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+ else
+ priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
}
static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
priv->port = port;
priv->queue = cvmx_pko_get_base_queue(priv->port);
priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+ priv->phy_mode = PHY_INTERFACE_MODE_NA;
for (qos = 0; qos < 16; qos++)
skb_queue_head_init(&priv->tx_free_list[qos]);
for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
strcpy(dev->name, "spi%d");
break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
+ priv->phy_mode = PHY_INTERFACE_MODE_GMII;
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+ strcpy(dev->name, "eth%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
strcpy(dev->name, "eth%d");
- cvm_set_rgmii_delay(priv->of_node, interface,
+ cvm_set_rgmii_delay(priv, interface,
port_index);
break;
}
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 4a07e7f43d12..be570d33685a 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -12,7 +12,7 @@
#define OCTEON_ETHERNET_H
#include <linux/of.h>
-
+#include <linux/phy.h>
#include <asm/octeon/cvmx-helper-board.h>
/**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
* cvmx_helper_interface_mode_t
*/
int imode;
+ /* PHY mode */
+ phy_interface_t phy_mode;
/* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16];
unsigned int last_speed;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 80b8d4153414..a54286498a47 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
{
unsigned char lob;
int ret, i;
- struct dcon_gpio *pin = &gpios_asis[0];
+ const struct dcon_gpio *pin = &gpios_asis[0];
for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 1723a47a96b4..952f2ab51347 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res == _FAIL)
+ goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
sizeof(struct hw_xmit), GFP_KERNEL);
+ if (!pxmitpriv->hwxmits)
+ return _FAIL;
hwxmits = pxmitpriv->hwxmits;
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ return _SUCCESS;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c74ea1..ba7e15fbde72 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 1920d02f7c9f..8c36acedf507 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
- u32 val;
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- if (pcmd->rsp && pcmd->rspsz > 0)
- memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
+ r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 92fb77666d44..1ef86b8c592f 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
static struct _cmd_callback cmd_callback[] = {
{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
{GEN_CMD_CODE(_Write_MACREG), NULL},
- {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Read_BBREG), NULL},
{GEN_CMD_CODE(_Write_BBREG), NULL},
{GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 094d61bcb469..b87f13a0b563 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
}
}
- rtw_alloc_hwxmits(padapter);
+ res = rtw_alloc_hwxmits(padapter);
+ if (res == _FAIL)
+ goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
return res;
}
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
- if (pxmitpriv->hwxmits == NULL) {
- DBG_871X("alloc hwxmits fail!...\n");
- return;
- }
+ if (!pxmitpriv->hwxmits)
+ return _FAIL;
hwxmits = pxmitpriv->hwxmits;
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
}
-
+ return _SUCCESS;
}
void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 1b38b9182b31..37f42b2f22f1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed954abb..4cc77b2016e1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
rtlpriv->phydm.internal =
kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+ if (!rtlpriv->phydm.internal)
+ return 0;
_rtl_phydm_init_com_info(rtlpriv, ic, params);
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f061dd1382aa..cf6b7a80b753 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1_rsvd_page_loc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
totalpacketlen);
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index edff6ce85655..9d85a3a1af4c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
+ synth_soft.alive = 1;
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
- if (!unicode)
- synth_buffer_skip_nonlatin1();
- if (!synth_buffer_empty() || speakup_info.flushing)
- break;
+ if (synth_current() == &synth_soft) {
+ if (!unicode)
+ synth_buffer_skip_nonlatin1();
+ if (!synth_buffer_empty() || speakup_info.flushing)
+ break;
+ }
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (fp->f_flags & O_NONBLOCK) {
finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
/* Keep 3 bytes available for a 16bit UTF-8-encoded character */
while (chars_sent <= count - bytes_per_ch) {
+ if (synth_current() != &synth_soft)
+ break;
if (speakup_info.flushing) {
speakup_info.flushing = 0;
ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
poll_wait(fp, &speakup_event, wait);
spin_lock_irqsave(&speakup_info.spinlock, flags);
- if (!synth_buffer_empty() || speakup_info.flushing)
+ if (synth_current() == &synth_soft &&
+ (!synth_buffer_empty() || speakup_info.flushing))
ret = EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index c8e688878fc7..ac6a74883af4 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
int synth_release_region(unsigned long start, unsigned long n);
int synth_add(struct spk_synth *in_synth);
void synth_remove(struct spk_synth *in_synth);
+struct spk_synth *synth_current(void);
extern struct speakup_info_t speakup_info;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 25f259ee4ffc..3568bfb89912 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
}
EXPORT_SYMBOL_GPL(synth_remove);
+struct spk_synth *synth_current(void)
+{
+ return synth;
+}
+EXPORT_SYMBOL_GPL(synth_current);
+
short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 804daf83be35..064d0db4c51e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
struct device_node *fw_node;
const struct of_device_id *of_id;
struct vchiq_drvdata *drvdata;
+ struct device *vchiq_dev;
int err;
of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
goto failed_platform_init;
}
- if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
- NULL, "vchiq")))
+ vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
+ "vchiq");
+ if (IS_ERR(vchiq_dev)) {
+ err = PTR_ERR(vchiq_dev);
goto failed_device_create;
+ }
vchiq_debugfs_init();
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b370985b58a1..c6bb4aaf9bd0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
return;
}
- MACvIntDisable(priv->PortOffset);
-
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
if (priv->vif)
vnt_interrupt_process(priv);
+
+ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
{
struct vnt_private *priv = arg;
- if (priv->vif)
- schedule_work(&priv->interrupt_work);
+ schedule_work(&priv->interrupt_work);
+
+ MACvIntDisable(priv->PortOffset);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d54818..3bdd56a1021b 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
struct clk *clk;
};
-static inline bool ar933x_uart_console_enabled(void)
-{
- return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int offset)
{
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
.index = -1,
.data = &ar933x_uart_driver,
};
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
- if (!ar933x_uart_console_enabled())
- return;
-
- ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
static struct uart_driver ar933x_uart_driver = {
.owner = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
- ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_console_ports[up->port.line] = up;
+#endif
ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
{
int ret;
- if (ar933x_uart_console_enabled())
- ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
ret = uart_register_driver(&ar933x_uart_driver);
if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 05147fe24343..0b4f36905321 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,8 @@ struct atmel_uart_port {
unsigned int pending_status;
spinlock_t lock_suspended;
+ bool hd_start_rx; /* can start RX during half-duplex operation */
+
/* ISO7816 */
unsigned int fidi_min;
unsigned int fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
__raw_writeb(value, port->membase + ATMEL_US_THR);
}
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+ return ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
+ (port->iso7816.flags & SER_ISO7816_ENABLED);
+}
+
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_pdc_rx(struct uart_port *port)
{
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_start_rx(port);
+
}
/*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
return;
if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED)
+ if (atmel_uart_is_half_duplex(port))
atmel_stop_rx(port);
if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
- else if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
+ else if (atmel_uart_is_half_duplex(port)) {
+ /*
+ * DMA done, re-enable TXEMPTY and signal that we can stop
+ * TX and start RX for RS485
+ */
+ atmel_port->hd_start_rx = true;
+ atmel_uart_writel(port, ATMEL_US_IER,
+ atmel_port->tx_done_mask);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
sg_dma_len(&atmel_port->sg_rx)/2,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(port->dev, "Preparing DMA cyclic failed\n");
+ goto chan_err;
+ }
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
- /* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
+
+ /* Start RX if flag was set and FIFO is empty */
+ if (atmel_port->hd_start_rx) {
+ if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+ & ATMEL_US_TXEMPTY))
+ dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+ atmel_port->hd_start_rx = false;
+ atmel_start_rx(port);
+ return;
+ }
+
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
}
}
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
} else {
- if (((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
- port->iso7816.flags & SER_ISO7816_ENABLED) {
+ if (atmel_uart_is_half_duplex(port)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6fb312e7af71..bfe5e9e034ec 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
char *cptr = config;
struct console *cons;
- if (!strlen(config) || isspace(config[0]))
+ if (!strlen(config) || isspace(config[0])) {
+ err = 0;
goto noconfig;
+ }
kgdboc_io_ops.is_console = 0;
kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f5bdde405627..450ba6d7996c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
if (spi->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(max310x_dt_ids, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
devtype = (struct max310x_devtype *)of_id->data;
} else {
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231f751d1ef4..7e7b1559fa36 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (!match)
+ return -ENODEV;
+
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 27235a526cce..4c188f4079b3 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.mapbase = r->start;
s->port.membase = ioremap(r->start, resource_size(r));
+ if (!s->port.membase) {
+ ret = -ENOMEM;
+ goto out_disable_clks;
+ }
s->port.ops = &mxs_auart_ops;
s->port.iotype = UPIO_MEM;
s->port.fifosize = MXS_AUART_FIFO_SIZE;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3bcec1c20219..35e5f9c5d5be 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
- int baud;
+ int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 635178cf3eed..09a183dfc526 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
- return ret;
+ goto err_i2c;
}
#endif
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx spi --> %d\n", ret);
- return ret;
+ goto err_spi;
}
#endif
return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+ uart_unregister_driver(&sc16is7xx_uart);
+ return ret;
}
module_init(sc16is7xx_init);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 060fcd42b6d5..2d1c626312cd 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit))
sci_stop_tx(port);
- } else {
- ctrl = serial_port_in(port, SCSCR);
-
- if (port->type != PORT_SCI) {
- serial_port_in(port, SCxSR); /* Dummy read */
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
- }
- ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
- }
}
/* On SH3, SCIF may read end-of-break as a space->mark char */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cbdcfa4..a9e12b3bc31d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
if (tty && C_HUPCL(tty))
tty_port_lower_dtr_rts(port);
- if (port->ops->shutdown)
+ if (port->ops && port->ops->shutdown)
port->ops->shutdown(port);
}
out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
*/
int tty_port_carrier_raised(struct tty_port *port)
{
- if (port->ops->carrier_raised == NULL)
+ if (!port->ops || !port->ops->carrier_raised)
return 1;
return port->ops->carrier_raised(port);
}
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
*/
void tty_port_raise_dtr_rts(struct tty_port *port)
{
- if (port->ops->dtr_rts)
+ if (port->ops && port->ops->dtr_rts)
port->ops->dtr_rts(port, 1);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
*/
void tty_port_lower_dtr_rts(struct tty_port *port)
{
- if (port->ops->dtr_rts)
+ if (port->ops && port->ops->dtr_rts)
port->ops->dtr_rts(port, 0);
}
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
if (!tty_port_initialized(port)) {
clear_bit(TTY_IO_ERROR, &tty->flags);
- if (port->ops->activate) {
+ if (port->ops && port->ops->activate) {
int retval = port->ops->activate(port, tty);
if (retval) {
mutex_unlock(&port->mutex);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 739f8960811a..ec666eb4b7b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
clear_bit(EVENT_RX_STALL, &acm->flags);
}
- if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+ if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
tty_port_tty_wakeup(&acm->port);
- clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
- }
}
/*
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 48277bbc15e4..73c8e6591746 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
do {
controller = of_find_node_with_property(controller, "phys");
+ if (!of_device_is_available(controller))
+ continue;
index = 0;
do {
if (arg0 == -1) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3189181bb628..975d7c1288e3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
PHY_MODE_USB_HOST_SS);
if (retval)
+ retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+ PHY_MODE_USB_HOST);
+ if (retval)
goto err_usb_phy_roothub_power_on;
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e403e8..8cced3609e24 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
(kernel_ulong_t) &dwc3_pci_mrfld_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 75b113a5b25c..f3816a5c861e 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -391,20 +391,20 @@ try_again:
req->complete = f_hidg_req_complete;
req->context = hidg;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- goto release_write_pending_unlocked;
+ goto release_write_pending;
} else {
status = count;
}
- spin_unlock_irqrestore(&hidg->write_spinlock, flags);
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f3126580e..c2011cd7df8c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f63f82450bf4..898339e5df10 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
(void) readl(&ep->dev->pci->pcimstctl);
writel(BIT(DMA_START), &dma->dmastat);
-
- if (!ep->is_in)
- stop_out_naking(ep);
}
static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
writel(BIT(DMA_START), &dma->dmastat);
return;
}
+ stop_out_naking(ep);
}
tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
- dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
- __func__);
+ ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
return -EINVAL;
}
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 934584f0a20a..6343fbacd244 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
retval = platform_driver_register(&u132_platform_driver);
+ if (retval)
+ destroy_workqueue(workqueue);
+
return retval;
}
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index c78be578abb0..d932cc31711e 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
return -1;
writel(0, &dbc->regs->control);
- xhci_dbc_mem_cleanup(xhci);
dbc->state = DS_DISABLED;
return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
ret = xhci_do_dbc_stop(xhci);
spin_unlock_irqrestore(&dbc->lock, flags);
- if (!ret)
+ if (!ret) {
+ xhci_dbc_mem_cleanup(xhci);
pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+ }
}
static void
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e2eece693655..96a740543183 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
port_index = max_ports;
while (port_index--) {
u32 t1, t2;
-
+ int retries = 10;
+retry:
t1 = readl(ports[port_index]->addr);
t2 = xhci_port_state_to_neutral(t1);
portsc_buf[port_index] = 0;
- /* Bail out if a USB3 port has a new device in link training */
- if ((hcd->speed >= HCD_USB3) &&
+ /*
+ * Give a USB3 port in link training time to finish, but don't
+ * prevent suspend as port might be stuck
+ */
+ if ((hcd->speed >= HCD_USB3) && retries-- &&
(t1 & PORT_PLS_MASK) == XDEV_POLLING) {
- bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
- return -EBUSY;
+ msleep(XHCI_PORT_POLLING_LFPS_TIME);
+ spin_lock_irqsave(&xhci->lock, flags);
+ xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
+ port_index);
+ goto retry;
}
-
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index a6e463715779..671bce18782c 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
if (!xhci_rcar_wait_for_pll_active(hcd))
return -ETIMEDOUT;
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
return xhci_rcar_download_firmware(hcd);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 40fa25c4d041..9215a28dad40 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
- if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
- DEV_SUPERSPEED_ANY(portsc)) {
+ if ((portsc & PORT_PLC) &&
+ DEV_SUPERSPEED_ANY(portsc) &&
+ ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
+ (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+ (portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- /* We've just brought the device into U0 through either the
+ /* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 652dc36e3012..9334cdee382a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -452,6 +452,14 @@ struct xhci_op_regs {
*/
#define XHCI_DEFAULT_BESL 4
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME 36
+
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d72b7d1d383..04684849d683 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
*/
hub->port_swap = USB251XB_DEF_PORT_SWAP;
of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
- if ((port >= 0) && (port <= data->port_cnt))
+ if (port <= data->port_cnt)
hub->port_swap |= BIT(port);
}
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
dev);
int err;
- if (np) {
+ if (np && of_id) {
err = usb251xb_get_ofdata(hub,
(struct usb251xb_data *)of_id->data);
if (err) {
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index bcc23486c4ed..928c2cd6fc00 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -6,6 +6,7 @@ config USB_MTU3
tristate "MediaTek USB3 Dual Role controller"
depends on USB || USB_GADGET
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on EXTCON || !EXTCON
select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
help
Say Y or M here if your system runs on MediaTek SoCs with
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fffe23ab0189..979bef9bfb6b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f5b17471759..1d8461ae2c34 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b863bedb55a1..5755f0df0025 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -567,7 +567,9 @@
/*
* NovaTech product ids (FTDI_VID)
*/
-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
/*
* Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fc52ac75fbf6..18110225d506 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
if (!urbtrack)
return -ENOMEM;
- kref_get(&mos_parport->ref_count);
- urbtrack->mos_parport = mos_parport;
urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urbtrack->urb) {
kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
usb_sndctrlpipe(usbdev, 0),
(unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
+ kref_get(&mos_parport->ref_count);
+ urbtrack->mos_parport = mos_parport;
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 11b21d9410f3..83869065b802 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
+#define QUECTEL_PRODUCT_EM12 0x0512
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0f62db091d8d..a2233d72ae7c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -37,6 +37,7 @@
S(SRC_ATTACHED), \
S(SRC_STARTUP), \
S(SRC_SEND_CAPABILITIES), \
+ S(SRC_SEND_CAPABILITIES_TIMEOUT), \
S(SRC_NEGOTIATE_CAPABILITIES), \
S(SRC_TRANSITION_SUPPLY), \
S(SRC_READY), \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
/* port->hard_reset_count = 0; */
port->caps_count = 0;
port->pd_capable = true;
- tcpm_set_state_cond(port, hard_reset_state(port),
+ tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
PD_T_SEND_SOURCE_CAP);
}
break;
+ case SRC_SEND_CAPABILITIES_TIMEOUT:
+ /*
+ * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+ *
+ * PD 2.0 sinks are supposed to accept src-capabilities with a
+ * 3.0 header and simply ignore any src PDOs which the sink does
+ * not understand such as PPS but some 2.0 sinks instead ignore
+ * the entire PD_DATA_SOURCE_CAP message, causing contract
+ * negotiation to fail.
+ *
+ * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+ * sending src-capabilities with a lower PD revision to
+ * make these broken sinks work.
+ */
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ } else if (port->negotiated_rev > PD_REV20) {
+ port->negotiated_rev--;
+ port->hard_reset_count = 0;
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ } else {
+ tcpm_set_state(port, hard_reset_state(port), 0);
+ }
+ break;
case SRC_NEGOTIATE_CAPABILITIES:
ret = tcpm_pd_check_request(port);
if (ret < 0) {
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..6770afd40765 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->dev = &pdev->dev;
wcove->regmap = pmic->regmap;
- irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
- platform_get_irq(pdev, 0));
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
+
+ irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
if (irq < 0)
return irq;
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d09409efe..8ca333f21292 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
#define GUEST_MAPPINGS_TRIES 5
+#define VBG_KERNEL_REQUEST \
+ (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+ VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
/**
* Reserves memory in which the VMM can relocate any guest mappings
* that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
int i, rc;
/* Query the required space. */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+ VBG_KERNEL_REQUEST);
if (!req)
return;
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
* Tell the host that we're going to free the memory we reserved for
* it, the free it up. (Leak the memory if anything goes wrong here.)
*/
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+ VBG_KERNEL_REQUEST);
if (!req)
return;
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
struct vmmdev_guest_info2 *req2 = NULL;
int rc, ret = -ENOMEM;
- req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
- req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+ req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+ VBG_KERNEL_REQUEST);
+ req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+ VBG_KERNEL_REQUEST);
if (!req1 || !req2)
goto out_free;
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
req2->additions_minor = VBG_VERSION_MINOR;
req2->additions_build = VBG_VERSION_BUILD;
req2->additions_revision = VBG_SVN_REV;
- /* (no features defined yet) */
- req2->additions_features = 0;
+ req2->additions_features =
+ VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
strlcpy(req2->name, VBG_VERSION_STRING,
sizeof(req2->name));
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
struct vmmdev_guest_status *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
struct vmmdev_heartbeat *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
gdev->guest_heartbeat_req = vbg_req_alloc(
sizeof(*gdev->guest_heartbeat_req),
- VMMDEVREQ_GUEST_HEARTBEAT);
+ VMMDEVREQ_GUEST_HEARTBEAT,
+ VBG_KERNEL_REQUEST);
if (!gdev->guest_heartbeat_req)
return -ENOMEM;
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
struct vmmdev_mask *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
u32 changed, previous;
int rc, ret = 0;
- /* Allocate a request buffer before taking the spinlock */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+ /*
+ * Allocate a request buffer before taking the spinlock, when
+ * the session is being terminated the requestor is the kernel,
+ * as we're cleaning up.
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
if (!req) {
if (!session_termination)
return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
struct vmmdev_mask *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
u32 changed, previous;
int rc, ret = 0;
- /* Allocate a request buffer before taking the spinlock */
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+ /*
+ * Allocate a request buffer before taking the spinlock, when
+ * the session is being terminated the requestor is the kernel,
+ * as we're cleaning up.
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+ session_termination ? VBG_KERNEL_REQUEST :
+ session->requestor);
if (!req) {
if (!session_termination)
return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
struct vmmdev_host_version *req;
int rc, ret;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
gdev->mem_balloon.get_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
- VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+ VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+ VBG_KERNEL_REQUEST);
gdev->mem_balloon.change_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
- VMMDEVREQ_CHANGE_MEMBALLOON);
+ VMMDEVREQ_CHANGE_MEMBALLOON,
+ VBG_KERNEL_REQUEST);
gdev->cancel_req =
vbg_req_alloc(sizeof(*(gdev->cancel_req)),
- VMMDEVREQ_HGCM_CANCEL2);
+ VMMDEVREQ_HGCM_CANCEL2,
+ VBG_KERNEL_REQUEST);
gdev->ack_events_req =
vbg_req_alloc(sizeof(*gdev->ack_events_req),
- VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+ VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+ VBG_KERNEL_REQUEST);
gdev->mouse_status_req =
vbg_req_alloc(sizeof(*gdev->mouse_status_req),
- VMMDEVREQ_GET_MOUSE_STATUS);
+ VMMDEVREQ_GET_MOUSE_STATUS,
+ VBG_KERNEL_REQUEST);
if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
!gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
* vboxguest_linux.c calls this when userspace opens the char-device.
* Return: A pointer to the new session or an ERR_PTR on error.
* @gdev: The Guest extension device.
- * @user: Set if this is a session for the vboxuser device.
+ * @requestor: VMMDEV_REQUESTOR_* flags
*/
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
{
struct vbg_session *session;
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
return ERR_PTR(-ENOMEM);
session->gdev = gdev;
- session->user_session = user;
+ session->requestor = requestor;
return session;
}
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
if (!session->hgcm_client_ids[i])
continue;
- vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+ /* requestor is kernel here, as we're cleaning up. */
+ vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+ session->hgcm_client_ids[i], &rc);
}
kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
return -EPERM;
}
- if (trusted_apps_only && session->user_session) {
+ if (trusted_apps_only &&
+ (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
req->request_type);
return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EMFILE;
- ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
- &conn->hdr.rc);
+ ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+ &client_id, &conn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EINVAL;
- ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+ ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+ &disconn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
}
if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
- ret = vbg_hgcm_call32(gdev, client_id,
+ ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS32(call),
call->parm_count, &call->hdr.rc);
else
- ret = vbg_hgcm_call(gdev, client_id,
+ ret = vbg_hgcm_call(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS(call),
call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
}
static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+ struct vbg_session *session,
struct vbg_ioctl_write_coredump *dump)
{
struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
return -EINVAL;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+ session->requestor);
if (!req)
return -ENOMEM;
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
case VBG_IOCTL_CHECK_BALLOON:
return vbg_ioctl_check_balloon(gdev, data);
case VBG_IOCTL_WRITE_CORE_DUMP:
- return vbg_ioctl_write_core_dump(gdev, data);
+ return vbg_ioctl_write_core_dump(gdev, session, data);
}
/* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
struct vmmdev_mouse_status *req;
int rc;
- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+ VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..4188c12b839f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
* host. Protected by vbg_gdev.session_mutex.
*/
u32 guest_caps;
- /** Does this session belong to a root process or a user one? */
- bool user_session;
+ /** VMMDEV_REQUESTOR_* flags */
+ u32 requestor;
/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
bool cancel_waiters;
};
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
void vbg_core_close_session(struct vbg_session *session);
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
void vbg_linux_mouse_event(struct vbg_dev *gdev);
/* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+ u32 requestor);
void vbg_req_free(void *req, size_t len);
int vbg_req_perform(struct vbg_dev *gdev, void *req);
int vbg_hgcm_call32(
- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
- int *vbox_status);
+ struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+ u32 parm_count, int *vbox_status);
#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..6e8c0f1c1056 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
* Copyright (C) 2006-2016 Oracle Corporation
*/
+#include <linux/cred.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
static struct vbg_dev *vbg_gdev;
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+ u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+ VMMDEV_REQUESTOR_CON_DONT_KNOW |
+ VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+ if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+ requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+ else
+ requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+ if (in_egroup_p(inode->i_gid))
+ requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+ return requestor;
+}
+
static int vbg_misc_device_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
- session = vbg_core_open_session(gdev, false);
+ session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
if (IS_ERR(session))
return PTR_ERR(session);
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
gdev = container_of(filp->private_data, struct vbg_dev,
misc_device_user);
- session = vbg_core_open_session(gdev, false);
+ session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+ VMMDEV_REQUESTOR_USER_DEVICE);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
if (is_vmmdev_req)
- buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+ buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+ session->requestor);
else
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf4474214b4d..75fd140b02ff 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
VBG_LOG(vbg_debug, pr_debug);
#endif
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+ u32 requestor)
{
struct vmmdev_request_header *req;
int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
req->request_type = req_type;
req->rc = VERR_GENERAL_FAILURE;
req->reserved1 = 0;
- req->reserved2 = 0;
+ req->requestor = requestor;
return req;
}
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
return done;
}
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status)
{
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
int rc;
hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
- VMMDEVREQ_HGCM_CONNECT);
+ VMMDEVREQ_HGCM_CONNECT, requestor);
if (!hgcm_connect)
return -ENOMEM;
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
}
EXPORT_SYMBOL(vbg_hgcm_connect);
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+ u32 client_id, int *vbox_status)
{
struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
int rc;
hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
- VMMDEVREQ_HGCM_DISCONNECT);
+ VMMDEVREQ_HGCM_DISCONNECT,
+ requestor);
if (!hgcm_disconnect)
return -ENOMEM;
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
return 0;
}
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
- u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
- u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+ int *vbox_status)
{
struct vmmdev_hgcm_call *call;
void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
goto free_bounce_bufs;
}
- call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+ call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
if (!call) {
ret = -ENOMEM;
goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
#ifdef CONFIG_COMPAT
int vbg_hgcm_call32(
- struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
- struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
- int *vbox_status)
+ struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+ u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+ u32 parm_count, int *vbox_status)
{
struct vmmdev_hgcm_function_parameter *parm64 = NULL;
u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
goto out_free;
}
- ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+ ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
parm64, parm_count, vbox_status);
if (ret < 0)
goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f8a231..84834dad38d5 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
#ifndef __VBOX_VERSION_H__
#define __VBOX_VERSION_H__
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
#define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..6337b8d75d96 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
s32 rc;
/** Reserved field no.1. MBZ. */
u32 reserved1;
- /** Reserved field no.2. MBZ. */
- u32 reserved2;
+ /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+ u32 requestor;
};
VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
};
VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
+
/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
struct vmmdev_guest_info2 {
/** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
u32 additions_build;
/** SVN revision. */
u32 additions_revision;
- /** Feature mask, currently unused. */
+ /** Feature mask. */
u32 additions_features;
/**
* The intentional meaning of this field was: