diff options
author | Olof Johansson <olof@lixom.net> | 2019-05-15 22:51:48 -0700 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2019-05-15 22:51:48 -0700 |
commit | 7a0c4c17089a8aff52f516f0f52002be52950aae (patch) | |
tree | 474dc451d62a1ba788ed52c4cecf93f9e50cc39a /drivers | |
parent | a842b514db723d7bec5e8f9e6f57e5cfbb6b1f37 (diff) | |
parent | affe2a51001450c32886f70248b57eac0f9b68ef (diff) | |
download | linux-7a0c4c17089a8aff52f516f0f52002be52950aae.tar.gz linux-7a0c4c17089a8aff52f516f0f52002be52950aae.tar.bz2 linux-7a0c4c17089a8aff52f516f0f52002be52950aae.zip |
Merge branch 'fixes' into arm/soc
Merge in a few pending fixes from pre-5.1 that didn't get sent in:
MAINTAINERS: update arch/arm/mach-davinci
ARM: dts: ls1021: Fix SGMII PCS link remaining down after PHY disconnect
ARM: dts: imx6q-logicpd: Reduce inrush current on USBH1
ARM: dts: imx6q-logicpd: Reduce inrush current on start
ARM: dts: imx: Fix the AR803X phy-mode
ARM: dts: sun8i: a33: Reintroduce default pinctrl muxing
arm64: dts: allwinner: a64: Rename hpvcc-supply to cpvdd-supply
ARM: sunxi: fix a leaked reference by adding missing of_node_put
ARM: sunxi: fix a leaked reference by adding missing of_node_put
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers')
315 files changed, 2445 insertions, 1493 deletions
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 62d3aa74277b..5e9d7348c16f 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) ACPI_FUNCTION_TRACE(ev_enable_gpe); - /* Enable the requested GPE */ + /* Clear the GPE status */ + status = acpi_hw_clear_gpe(gpe_event_info); + if (ACPI_FAILURE(status)) + return_ACPI_STATUS(status); + /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index 8638f43cfc3d..79d86da1c892 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c @@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node) } } + if (obj_desc->common.type == ACPI_TYPE_REGION) { + acpi_ut_remove_address_range(obj_desc->region.space_id, node); + } + /* Clear the Node entry in all cases */ node->object = NULL; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 5a389a4f4f65..f1ed0befe303 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -567,6 +567,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, goto out; } + dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, + cmd_name, out_obj->buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, + out_obj->buffer.pointer, + min_t(u32, 128, out_obj->buffer.length), true); + if (call_pkg) { call_pkg->nd_fw_size = out_obj->buffer.length; memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, @@ -585,12 +591,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return 0; } - dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, - cmd_name, out_obj->buffer.length); - print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, - out_obj->buffer.pointer, - min_t(u32, 128, out_obj->buffer.length), true); - for (i = 0, offset = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, (u32 *) out_obj->buffer.pointer, diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index f70de71f79d6..cddd0fcf622c 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -122,9 +122,8 @@ static int intel_security_change_key(struct nvdimm *nvdimm, if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; - if (old_data) - memcpy(nd_cmd.cmd.old_pass, old_data->data, - sizeof(nd_cmd.cmd.old_pass)); + memcpy(nd_cmd.cmd.old_pass, old_data->data, + sizeof(nd_cmd.cmd.old_pass)); memcpy(nd_cmd.cmd.new_pass, new_data->data, sizeof(nd_cmd.cmd.new_pass)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -336,9 +335,8 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, /* flush all cache before we erase DIMM */ nvdimm_invalidate_cache(); - if (nkey) - memcpy(nd_cmd.cmd.passphrase, nkey->data, - sizeof(nd_cmd.cmd.passphrase)); + memcpy(nd_cmd.cmd.passphrase, nkey->data, + sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index cb8347500ce2..e49028a60429 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -506,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr, ret = lock_device_hotplug_sysfs(); if (ret) - goto out; + return ret; nid = memory_add_physaddr_to_nid(phys_addr); ret = __add_memory(nid, phys_addr, diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 417a9f15c116..d7ac09c092f2 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1748,6 +1748,11 @@ static int __init null_init(void) return -EINVAL; } + if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { + pr_err("null_blk: invalid home_node value\n"); + g_home_node = NUMA_NO_NODE; + } + if (g_queue_mode == NULL_Q_RQ) { pr_err("null_blk: legacy IO path no longer available\n"); return -EINVAL; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 377a694dc228..6d415b20fb70 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -314,6 +314,7 @@ static void pcd_init_units(void) disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, 1, BLK_MQ_F_SHOULD_MERGE); if (IS_ERR(disk->queue)) { + put_disk(disk); disk->queue = NULL; continue; } @@ -750,6 +751,8 @@ static int pcd_detect(void) printk("%s: No CD-ROM drive found\n", name); for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (!cd->disk) + continue; blk_cleanup_queue(cd->disk->queue); cd->disk->queue = NULL; blk_mq_free_tag_set(&cd->tag_set); @@ -1010,8 +1013,14 @@ static int __init pcd_init(void) pcd_probe_capabilities(); if (register_blkdev(major, name)) { - for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (!cd->disk) + continue; + + blk_cleanup_queue(cd->disk->queue); + blk_mq_free_tag_set(&cd->tag_set); put_disk(cd->disk); + } return -EBUSY; } @@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void) int unit; for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (!cd->disk) + continue; + if (cd->present) { del_gendisk(cd->disk); pi_release(cd->pi); diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 103b617cdc31..35e6e271b219 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -762,6 +762,8 @@ static int pf_detect(void) printk("%s: No ATAPI disk detected\n", name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + if (!pf->disk) + continue; blk_cleanup_queue(pf->disk->queue); pf->disk->queue = NULL; blk_mq_free_tag_set(&pf->tag_set); @@ -1029,8 +1031,13 @@ static int __init pf_init(void) pf_busy = 0; if (register_blkdev(major, name)) { - for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + if (!pf->disk) + continue; + blk_cleanup_queue(pf->disk->queue); + blk_mq_free_tag_set(&pf->tag_set); put_disk(pf->disk); + } return -EBUSY; } @@ -1051,6 +1058,9 @@ static void __exit pf_exit(void) int unit; unregister_blkdev(major, name); for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + if (!pf->disk) + continue; + if (pf->present) del_gendisk(pf->disk); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4bc083b7c9b5..2a7ca4a1e6f7 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -513,6 +513,8 @@ static int init_vq(struct virtio_blk *vblk) if (err) num_vqs = 1; + num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs); + vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); if (!vblk->vqs) return -ENOMEM; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 87ccef4bd69e..32a21b8d1d85 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace) return 0; err_read: + /* prevent double queue cleanup */ + ace->gd->queue = NULL; put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ded198328f21..7db48ae65cd2 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2942,6 +2942,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) return 0; } + irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, 0, "OOB Wake-on-BT", data); if (ret) { @@ -2956,7 +2957,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev) } data->oob_wake_irq = irq; - disable_irq(irq); bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); return 0; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 72866a004f07..466ebd84ad17 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -348,7 +348,7 @@ config XILINX_HWICAP config R3964 tristate "Siemens R3964 line discipline" - depends on TTY + depends on TTY && BROKEN ---help--- This driver allows synchronous communication with devices using the Siemens R3964 packet protocol. Unless you are dealing with special diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index ff0b199be472..f2411468f33f 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -66,7 +66,6 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr, return; } - memset(&p, 0, sizeof(p)); p.addr = base_addr; p.space = space; p.regspacing = offset; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e8ba67834746..00bf4b17edbf 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -214,6 +214,9 @@ struct ipmi_user { /* Does this interface receive IPMI events? */ bool gets_events; + + /* Free must run in process context for RCU cleanup. */ + struct work_struct remove_work; }; static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) @@ -1157,6 +1160,15 @@ static int intf_err_seq(struct ipmi_smi *intf, return rv; } +static void free_user_work(struct work_struct *work) +{ + struct ipmi_user *user = container_of(work, struct ipmi_user, + remove_work); + + cleanup_srcu_struct(&user->release_barrier); + kfree(user); +} + int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, @@ -1200,6 +1212,8 @@ int ipmi_create_user(unsigned int if_num, goto out_kfree; found: + INIT_WORK(&new_user->remove_work, free_user_work); + rv = init_srcu_struct(&new_user->release_barrier); if (rv) goto out_kfree; @@ -1260,8 +1274,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info); static void free_user(struct kref *ref) { struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); - cleanup_srcu_struct(&user->release_barrier); - kfree(user); + + /* SRCU cleanup must happen in task context. */ + schedule_work(&user->remove_work); } static void _ipmi_destroy_user(struct ipmi_user *user) diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c index 01946cad3d13..682221eebd66 100644 --- a/drivers/char/ipmi/ipmi_si_hardcode.c +++ b/drivers/char/ipmi/ipmi_si_hardcode.c @@ -118,6 +118,8 @@ void __init ipmi_hardcode_init(void) char *str; char *si_type[SI_MAX_PARMS]; + memset(si_type, 0, sizeof(si_type)); + /* Parse out the si_type string into its components. */ str = si_type_str; if (*str != '\0') { diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index d8b77133a83a..f824563fc28d 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -37,8 +37,8 @@ * * Returns size of the event. If it is an invalid event, returns 0. */ -static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event, - struct tcg_pcr_event *event_header) +static size_t calc_tpm2_event_size(struct tcg_pcr_event2_head *event, + struct tcg_pcr_event *event_header) { struct tcg_efi_specid_event_head *efispecid; struct tcg_event_field *event_field; diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 8856cce5a23b..817ae09a369e 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -233,12 +233,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait) __poll_t mask = 0; poll_wait(file, &priv->async_wait, wait); + mutex_lock(&priv->buffer_mutex); - if (!priv->response_read || priv->response_length) + /* + * The response_length indicates if there is still response + * (or part of it) to be consumed. Partial reads decrease it + * by the number of bytes read, and write resets it the zero. + */ + if (priv->response_length) mask = EPOLLIN | EPOLLRDNORM; else mask = EPOLLOUT | EPOLLWRNORM; + mutex_unlock(&priv->buffer_mutex); return mask; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 83ece5639f86..ae1030c9b086 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -402,15 +402,13 @@ int tpm_pm_suspend(struct device *dev) if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) return 0; - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - mutex_lock(&chip->tpm_mutex); - if (!tpm_chip_start(chip)) { + if (!tpm_chip_start(chip)) { + if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm2_shutdown(chip, TPM2_SU_STATE); - tpm_chip_stop(chip); - } - mutex_unlock(&chip->tpm_mutex); - } else { - rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + else + rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + + tpm_chip_stop(chip); } return rc; diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 89d6f3736dbf..f8edbb65eda3 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -20,8 +20,7 @@ #define PROG_ID_MAX 7 #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) -#define PROG_PRES_MASK 0x7 -#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK) +#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & layout->pres_mask) #define PROG_MAX_RM9200_CSS 3 struct clk_programmable { @@ -37,20 +36,29 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_programmable *prog = to_clk_programmable(hw); + const struct clk_programmable_layout *layout = prog->layout; unsigned int pckr; + unsigned long rate; regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); - return parent_rate >> PROG_PRES(prog->layout, pckr); + if (layout->is_pres_direct) + rate = parent_rate / (PROG_PRES(layout, pckr) + 1); + else + rate = parent_rate >> PROG_PRES(layout, pckr); + + return rate; } static int clk_programmable_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { + struct clk_programmable *prog = to_clk_programmable(hw); + const struct clk_programmable_layout *layout = prog->layout; struct clk_hw *parent; long best_rate = -EINVAL; unsigned long parent_rate; - unsigned long tmp_rate; + unsigned long tmp_rate = 0; int shift; int i; @@ -60,10 +68,18 @@ static int clk_programmable_determine_rate(struct clk_hw *hw, continue; parent_rate = clk_hw_get_rate(parent); - for (shift = 0; shift < PROG_PRES_MASK; shift++) { - tmp_rate = parent_rate >> shift; - if (tmp_rate <= req->rate) - break; + if (layout->is_pres_direct) { + for (shift = 0; shift <= layout->pres_mask; shift++) { + tmp_rate = parent_rate / (shift + 1); + if (tmp_rate <= req->rate) + break; + } + } else { + for (shift = 0; shift < layout->pres_mask; shift++) { + tmp_rate = parent_rate >> shift; + if (tmp_rate <= req->rate) + break; + } } if (tmp_rate > req->rate) @@ -137,16 +153,23 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate, if (!div) return -EINVAL; - shift = fls(div) - 1; + if (layout->is_pres_direct) { + shift = div - 1; - if (div != (1 << shift)) - return -EINVAL; + if (shift > layout->pres_mask) + return -EINVAL; + } else { + shift = fls(div) - 1; - if (shift >= PROG_PRES_MASK) - return -EINVAL; + if (div != (1 << shift)) + return -EINVAL; + + if (shift >= layout->pres_mask) + return -EINVAL; + } regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), - PROG_PRES_MASK << layout->pres_shift, + layout->pres_mask << layout->pres_shift, shift << layout->pres_shift); return 0; @@ -202,19 +225,25 @@ at91_clk_register_programmable(struct regmap *regmap, } const struct clk_programmable_layout at91rm9200_programmable_layout = { + .pres_mask = 0x7, .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 0, + .is_pres_direct = 0, }; const struct clk_programmable_layout at91sam9g45_programmable_layout = { + .pres_mask = 0x7, .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 1, + .is_pres_direct = 0, }; const struct clk_programmable_layout at91sam9x5_programmable_layout = { + .pres_mask = 0x7, .pres_shift = 4, .css_mask = 0x7, .have_slck_mck = 0, + .is_pres_direct = 0, }; diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 672a79bda88c..a0e5ce9c9b9e 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -71,9 +71,11 @@ struct clk_pll_characteristics { }; struct clk_programmable_layout { + u8 pres_mask; u8 pres_shift; u8 css_mask; u8 have_slck_mck; + u8 is_pres_direct; }; extern const struct clk_programmable_layout at91rm9200_programmable_layout; diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index 1f70cb164b06..81943fac4537 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -125,6 +125,14 @@ static const struct { .pll = true }, }; +static const struct clk_programmable_layout sama5d2_programmable_layout = { + .pres_mask = 0xff, + .pres_shift = 4, + .css_mask = 0x7, + .have_slck_mck = 0, + .is_pres_direct = 1, +}; + static void __init sama5d2_pmc_setup(struct device_node *np) { struct clk_range range = CLK_RANGE(0, 0); @@ -249,7 +257,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 6, i, - &at91sam9x5_programmable_layout); + &sama5d2_programmable_layout); if (IS_ERR(hw)) goto err_free; } diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 1acfa3e3cfb4..113d71042199 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -362,7 +362,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, switch (pll_clk->type) { case PLL_1416X: - if (!pll->rate_table) + if (!pll_clk->rate_table) init.ops = &clk_pll1416x_min_ops; else init.ops = &clk_pll1416x_ops; diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c index 9628d4e7690b..85daf826619a 100644 --- a/drivers/clk/mediatek/clk-gate.c +++ b/drivers/clk/mediatek/clk-gate.c @@ -169,11 +169,10 @@ struct clk *mtk_clk_register_gate( return ERR_PTR(-ENOMEM); init.name = name; - init.flags = CLK_SET_RATE_PARENT; + init.flags = flags | CLK_SET_RATE_PARENT; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; init.ops = ops; - init.flags = flags; cg->regmap = regmap; cg->set_ofs = set_ofs; diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 41e16dd7272a..7a14ac9b2fec 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -120,7 +120,7 @@ static bool meson_clk_pll_is_better(unsigned long rate, return true; } else { /* Round down */ - if (now < rate && best < now) + if (now <= rate && best < now) return true; } diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 0e1ce8c03259..f7b11e1eeebe 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -960,14 +960,14 @@ static struct clk_regmap g12a_sd_emmc_c_clk0 = { /* VPU Clock */ static const char * const g12a_vpu_parent_names[] = { - "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", + "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7", "mpll1", "vid_pll", "hifi_pll", "gp0_pll", }; static struct clk_regmap g12a_vpu_0_sel = { .data = &(struct clk_regmap_mux_data){ .offset = HHI_VPU_CLK_CNTL, - .mask = 0x3, + .mask = 0x7, .shift = 9, }, .hw.init = &(struct clk_init_data){ @@ -1011,7 +1011,7 @@ static struct clk_regmap g12a_vpu_0 = { static struct clk_regmap g12a_vpu_1_sel = { .data = &(struct clk_regmap_mux_data){ .offset = HHI_VPU_CLK_CNTL, - .mask = 0x3, + .mask = 0x7, .shift = 25, }, .hw.init = &(struct clk_init_data){ diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 04df2e208ed6..29ffb4fde714 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -2216,6 +2216,7 @@ static struct clk_regmap gxbb_vdec_1_div = { .offset = HHI_VDEC_CLK_CNTL, .shift = 0, .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "vdec_1_div", @@ -2261,6 +2262,7 @@ static struct clk_regmap gxbb_vdec_hevc_div = { .offset = HHI_VDEC2_CLK_CNTL, .shift = 16, .width = 7, + .flags = CLK_DIVIDER_ROUND_CLOSEST, }, .hw.init = &(struct clk_init_data){ .name = "vdec_hevc_div", diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c index 08bcc01c0923..daff235bc763 100644 --- a/drivers/clk/meson/vid-pll-div.c +++ b/drivers/clk/meson/vid-pll-div.c @@ -82,8 +82,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw, div = _get_table_val(meson_parm_read(clk->map, &pll_div->val), meson_parm_read(clk->map, &pll_div->sel)); if (!div || !div->divider) { - pr_info("%s: Invalid config value for vid_pll_div\n", __func__); - return parent_rate; + pr_debug("%s: Invalid config value for vid_pll_div\n", __func__); + return 0; } return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider); diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index d977193842df..19174835693b 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = { }; static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, - void __iomem *base, + const struct pmc_clk_data *pmc_data, const char **parent_names, int num_parents) { @@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, init.num_parents = num_parents; pclk->hw.init = &init; - pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; + pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; spin_lock_init(&pclk->lock); + /* + * On some systems, the pmc_plt_clocks already enabled by the + * firmware are being marked as critical to avoid them being + * gated by the clock framework. + */ + if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw)) + init.flags |= CLK_IS_CRITICAL; + ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); if (ret) { pclk = ERR_PTR(ret); @@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev) return PTR_ERR(parent_names); for (i = 0; i < PMC_CLK_NUM; i++) { - data->clks[i] = plt_clk_register(pdev, i, pmc_data->base, + data->clks[i] = plt_clk_register(pdev, i, pmc_data, parent_names, data->nparents); if (IS_ERR(data->clks[i])) { err = PTR_ERR(data->clks[i]); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 6d2b0d821c27..48321488f0fd 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -152,6 +152,7 @@ config VT8500_TIMER config NPCM7XX_TIMER bool "NPCM7xx timer driver" if COMPILE_TEST depends on HAS_IOMEM + select TIMER_OF select CLKSRC_MMIO help Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture, diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index aa4ec53281ce..ea373cfbcecb 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -9,7 +9,7 @@ * published by the Free Software Foundation. */ -#define pr_fmt(fmt) "arm_arch_timer: " fmt +#define pr_fmt(fmt) "arch_timer: " fmt #include <linux/init.h> #include <linux/kernel.h> @@ -33,9 +33,6 @@ #include <clocksource/arm_arch_timer.h> -#undef pr_fmt -#define pr_fmt(fmt) "arch_timer: " fmt - #define CNTTIDR 0x08 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c index eed6feff8b5f..30c6f4ce672b 100644 --- a/drivers/clocksource/timer-oxnas-rps.c +++ b/drivers/clocksource/timer-oxnas-rps.c @@ -296,4 +296,4 @@ err_alloc: TIMER_OF_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init); TIMER_OF_DECLARE(ox820_rps, - "oxsemi,ox820se-rps-timer", oxnas_rps_timer_init); + "oxsemi,ox820-rps-timer", oxnas_rps_timer_init); diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 3352da6ed61f..ee8ec5a8cb16 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload, return 0; } -/* Optimized set_load which removes costly spin wait in timer_start */ -static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, - int autoreload, unsigned int load) -{ - u32 l; - - if (unlikely(!timer)) - return -EINVAL; - - omap_dm_timer_enable(timer); - - l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); - if (autoreload) { - l |= OMAP_TIMER_CTRL_AR; - omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load); - } else { - l &= ~OMAP_TIMER_CTRL_AR; - } - l |= OMAP_TIMER_CTRL_ST; - - __omap_dm_timer_load_start(timer, l, load, timer->posted); - - /* Save the context */ - timer->context.tclr = l; - timer->context.tldr = load; - timer->context.tcrr = load; - return 0; -} static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable, unsigned int match) { diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b599c7318aab..2986119dd31f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2596,6 +2596,9 @@ static int __init intel_pstate_init(void) const struct x86_cpu_id *id; int rc; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + if (no_load) return -ENODEV; @@ -2611,7 +2614,7 @@ static int __init intel_pstate_init(void) } else { id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) { - pr_info("CPU ID not supported\n"); + pr_info("CPU model not supported\n"); return -ENODEV; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index b1eadc6652b5..7205d9f4029e 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -865,19 +865,18 @@ static int ahash_update_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - if (mapped_nents) { + if (mapped_nents) sg_to_sec4_sg_last(req->src, mapped_nents, edesc->sec4_sg + sec4_sg_src_index, 0); - if (*next_buflen) - scatterwalk_map_and_copy(next_buf, req->src, - to_hash - *buflen, - *next_buflen, 0); - } else { + else sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); - } + if (*next_buflen) + scatterwalk_map_and_copy(next_buf, req->src, + to_hash - *buflen, + *next_buflen, 0); desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 8e17149655f0..540e8cd16ee6 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -116,7 +116,7 @@ config EXTCON_PALMAS config EXTCON_PTN5150 tristate "NXP PTN5150 CC LOGIC USB EXTCON support" - depends on I2C && GPIOLIB || COMPILE_TEST + depends on I2C && (GPIOLIB || COMPILE_TEST) select REGMAP_I2C help Say Y here to enable support for USB peripheral and USB host diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4f8fb4ecde34..79fb302fb954 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) /* No need to recover an evicted BO */ if (shadow->tbo.mem.mem_type != TTM_PL_TT || + shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) continue; @@ -3173,11 +3174,16 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) break; if (fence) { - r = dma_fence_wait_timeout(fence, false, tmo); + tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); fence = next; - if (r <= 0) + if (tmo == 0) { + r = -ETIMEDOUT; break; + } else if (tmo < 0) { + r = tmo; + break; + } } else { fence = next; } @@ -3188,8 +3194,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); - if (r <= 0 || tmo <= 0) { - DRM_ERROR("recover vram bo from shadow failed\n"); + if (r < 0 || tmo <= 0) { + DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); return -EIO; } @@ -3625,6 +3631,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, struct pci_dev *pdev = adev->pdev; enum pci_bus_speed cur_speed; enum pcie_link_width cur_width; + u32 ret = 1; *speed = PCI_SPEED_UNKNOWN; *width = PCIE_LNK_WIDTH_UNKNOWN; @@ -3632,6 +3639,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, while (pdev) { cur_speed = pcie_get_speed_cap(pdev); cur_width = pcie_get_width_cap(pdev); + ret = pcie_bandwidth_available(adev->pdev, NULL, + NULL, &cur_width); + if (!ret) + cur_width = PCIE_LNK_WIDTH_RESRV; if (cur_speed != PCI_SPEED_UNKNOWN) { if (*speed == PCI_SPEED_UNKNOWN) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0b8ef2d27d6b..fe393a46f881 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -35,6 +35,7 @@ #include "amdgpu_trace.h" #define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) +#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000) /* * IB @@ -344,6 +345,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) * cost waiting for it coming back under RUNTIME only */ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; + } else if (adev->gmc.xgmi.hive_id) { + tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT; } for (i = 0; i < adev->num_rings; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d0309e8c9d12..a11db2b1a63f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); - adev->gfx.rlc.funcs->reset(adev); - gfx_v9_0_init_pg(adev); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index d0d966d6080a..1696644ec022 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6); } + WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp); tmp = mmVM_L2_CNTL4_DEFAULT; tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8be9677c0c07..cf9a49f49d3a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -320,6 +320,7 @@ static const struct kfd_deviceid supported_devices[] = { { 0x9876, &carrizo_device_info }, /* Carrizo */ { 0x9877, &carrizo_device_info }, /* Carrizo */ { 0x15DD, &raven_device_info }, /* Raven */ + { 0x15D8, &raven_device_info }, /* Raven */ #endif { 0x67A0, &hawaii_device_info }, /* Hawaii */ { 0x67A1, &hawaii_device_info }, /* Hawaii */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 81127f7d6ed1..3082b55b1e77 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4533,6 +4533,7 @@ static void handle_cursor_update(struct drm_plane *plane, amdgpu_crtc->cursor_width = plane->state->crtc_w; amdgpu_crtc->cursor_height = plane->state->crtc_h; + memset(&attributes, 0, sizeof(attributes)); attributes.address.high_part = upper_32_bits(address); attributes.address.low_part = lower_32_bits(address); attributes.width = plane->state->crtc_w; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c68fbd55db3c..a6cda201c964 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc, return UPDATE_TYPE_FULL; } + if (u->surface->force_full_update) { + update_flags->bits.full_update = 1; + return UPDATE_TYPE_FULL; + } + type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); @@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc, } dc_resource_state_copy_construct(state, context); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) + new_pipe->plane_state->force_full_update = true; + } } @@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc, dc->current_state = context; dc_release_state(old); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream == stream) + pipe_ctx->plane_state->force_full_update = false; + } } /*let's use current_state to update watermark etc*/ if (update_type >= UPDATE_TYPE_FULL) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4eba3c4800b6..ea18e9c2d8ce 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2660,12 +2660,18 @@ void core_link_enable_stream( void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; core_dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + dal_ddc_service_write_scdc_data( + stream->link->ddc, 0, + stream->timing.flags.LTE_340MCSC_SCRAMBLE); + core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1a7fd6aa77eb..0515095574e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -503,6 +503,9 @@ struct dc_plane_state { struct dc_plane_status status; struct dc_context *ctx; + /* HACK: Workaround for forcing full reprogramming under some conditions */ + bool force_full_update; + /* private to dc_surface.c */ enum dc_irq_source irq_source; struct kref refcount; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 4febf4ef7240..4fe3664fb495 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -190,6 +190,12 @@ static void submit_channel_request( 1, 0); } + + REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); + + REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, + 10, aux110->timeout_period/10); + /* set the delay and the number of bytes to write */ /* The length include @@ -242,9 +248,6 @@ static void submit_channel_request( } } - REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); - REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index d27f22c05e4b..e28ed6a00ff4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a, * at most within ~240usec. That means, * increasing this timeout will not affect normal operation, * and we'll timeout after - * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec. + * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec. * This timeout is especially important for - * resume from S3 and CTS. + * converters, resume from S3, and CTS. */ - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6 }; struct dce_aux { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 683829466a44..0ba68d41b9c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1150,28 +1150,9 @@ void hubp1_cursor_set_position( REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en); - //account for cases where we see negative offset relative to overlay plane - if (src_x_offset < 0 && src_y_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, 0, - CURSOR_Y_POSITION, 0); - x_hotspot -= src_x_offset; - y_hotspot -= src_y_offset; - } else if (src_x_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, 0, - CURSOR_Y_POSITION, pos->y); - x_hotspot -= src_x_offset; - } else if (src_y_offset < 0) { - REG_SET_2(CURSOR_POSITION, 0, + REG_SET_2(CURSOR_POSITION, 0, CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, 0); - y_hotspot -= src_y_offset; - } else { - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, pos->y); - } + CURSOR_Y_POSITION, pos->y); REG_SET_2(CURSOR_HOT_SPOT, 0, CURSOR_HOT_SPOT_X, x_hotspot, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 9aa7bec1b5fe..23b5b94a4939 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) * MP0CLK DS */ data->registry_data.disallowed_features = 0xE0041C00; + /* ECC feature should be disabled on old SMUs */ + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); + hwmgr->smu_version = smum_get_argument(hwmgr); + if (hwmgr->smu_version < 0x282100) + data->registry_data.disallowed_features |= FEATURE_ECC_MASK; + data->registry_data.od_state_in_dc_support = 0; data->registry_data.thermal_support = 1; data->registry_data.skip_baco_hardware = 0; @@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; + data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT; for (i = 0; i < GNLD_FEATURES_MAX; i++) { data->smu_features[i].smu_feature_bitmap = @@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) "FCLK_DS", "MP1CLK_DS", "MP0CLK_DS", - "XGMI"}; + "XGMI", + "ECC"}; static const char *output_title[] = { "FEATURES", "BITMASK", @@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) struct vega20_single_dpm_table *dpm_table; bool vblank_too_short = false; bool disable_mclk_switching; + bool disable_fclk_switching; uint32_t i, latency; disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && @@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) if (hwmgr->display_config->nb_pstate_switch_disable) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + if ((disable_mclk_switching && + (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) || + hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value) + disable_fclk_switching = true; + else + disable_fclk_switching = false; + /* fclk */ dpm_table = &(data->dpm_table.fclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; - if (hwmgr->display_config->nb_pstate_switch_disable) + if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching) dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; /* vclk */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index a5bc758ae097..ac2a3118a0ae 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -80,6 +80,7 @@ enum { GNLD_DS_MP1CLK, GNLD_DS_MP0CLK, GNLD_XGMI, + GNLD_ECC, GNLD_FEATURES_MAX }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 63d5cf691549..195c4ae67058 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -99,7 +99,7 @@ #define FEATURE_DS_MP1CLK_BIT 30 #define FEATURE_DS_MP0CLK_BIT 31 #define FEATURE_XGMI_BIT 32 -#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_ECC_BIT 33 #define FEATURE_SPARE_34_BIT 34 #define FEATURE_SPARE_35_BIT 35 #define FEATURE_SPARE_36_BIT 36 @@ -165,7 +165,8 @@ #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) -#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) +#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT ) +#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT ) #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index a63e5f0dae56..db761329a1e3 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1037,6 +1037,31 @@ void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, } EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write); +/* Filter out invalid setups to avoid configuring SCDC and scrambling */ +static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi) +{ + struct drm_display_info *display = &hdmi->connector.display_info; + + /* Completely disable SCDC support for older controllers */ + if (hdmi->version < 0x200a) + return false; + + /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */ + if (!display->hdmi.scdc.supported || + !display->hdmi.scdc.scrambling.supported) + return false; + + /* + * Disable if display only support low TMDS rates and scrambling + * for low rates is not supported either + */ + if (!display->hdmi.scdc.scrambling.low_rates && + display->max_tmds_clock <= 340000) + return false; + + return true; +} + /* * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates: * - The Source shall suspend transmission of the TMDS clock and data @@ -1055,7 +1080,7 @@ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi) unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */ - if (hdmi->connector.display_info.hdmi.scdc.supported) { + if (dw_hdmi_support_scdc(hdmi)) { if (mtmdsclock > HDMI14_MAX_TMDSCLK) drm_scdc_set_high_tmds_clock_ratio(hdmi->ddc, 1); else @@ -1579,8 +1604,9 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, /* Set up HDMI_FC_INVIDCONF */ inv_val = (hdmi->hdmi_data.hdcp_enable || - vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || - hdmi_info->scdc.scrambling.low_rates ? + (dw_hdmi_support_scdc(hdmi) && + (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || + hdmi_info->scdc.scrambling.low_rates)) ? HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); @@ -1646,7 +1672,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, } /* Scrambling Control */ - if (hdmi_info->scdc.supported) { + if (dw_hdmi_support_scdc(hdmi)) { if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || hdmi_info->scdc.scrambling.low_rates) { /* diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 40ac19848034..fbb76332cc9f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1034,7 +1034,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs->atomic_disable(crtc, old_crtc_state); else if (funcs->disable) funcs->disable(crtc); - else + else if (funcs->dpms) funcs->dpms(crtc, DRM_MODE_DPMS_OFF); if (!(dev->irq_enabled && dev->num_crtcs)) @@ -1277,10 +1277,9 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, if (new_crtc_state->enable) { DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", crtc->base.id, crtc->name); - if (funcs->atomic_enable) funcs->atomic_enable(crtc, old_crtc_state); - else + else if (funcs->commit) funcs->commit(crtc); } } diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 035479e273be..e3f9caa7839f 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) /** * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU * @vgpu: a vGPU - * @conncted: link state + * @connected: link state * * This function is used to trigger hotplug interrupt for vGPU * diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 3e7e2b80c857..69a9a1b2ea4a 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -209,7 +209,7 @@ static int vgpu_get_plane_info(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_vgpu_primary_plane_format p; struct intel_vgpu_cursor_plane_format c; - int ret; + int ret, tile_height = 1; if (plane_id == DRM_PLANE_TYPE_PRIMARY) { ret = intel_vgpu_decode_primary_plane(vgpu, &p); @@ -228,19 +228,19 @@ static int vgpu_get_plane_info(struct drm_device *dev, break; case PLANE_CTL_TILED_X: info->drm_format_mod = I915_FORMAT_MOD_X_TILED; + tile_height = 8; break; case PLANE_CTL_TILED_Y: info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; + tile_height = 32; break; case PLANE_CTL_TILED_YF: info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; + tile_height = 32; break; default: gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); } - - info->size = (((p.stride * p.height * p.bpp) / 8) + - (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { ret = intel_vgpu_decode_cursor_plane(vgpu, &c); if (ret) @@ -262,14 +262,13 @@ static int vgpu_get_plane_info(struct drm_device *dev, info->x_hot = UINT_MAX; info->y_hot = UINT_MAX; } - - info->size = (((info->stride * c.height * c.bpp) / 8) - + (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else { gvt_vgpu_err("invalid plane id:%d\n", plane_id); return -EINVAL; } + info->size = (info->stride * roundup(info->height, tile_height) + + PAGE_SIZE - 1) >> PAGE_SHIFT; if (info->size == 0) { gvt_vgpu_err("fb size is zero\n"); return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d7052ab7908c..9814773882ec 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -750,14 +750,20 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) { - struct intel_vgpu_ppgtt_spt *spt; + struct intel_vgpu_ppgtt_spt *spt, *spn; struct radix_tree_iter iter; - void **slot; + LIST_HEAD(all_spt); + void __rcu **slot; + rcu_read_lock(); radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { spt = radix_tree_deref_slot(slot); - ppgtt_free_spt(spt); + list_move(&spt->post_shadow_list, &all_spt); } + rcu_read_unlock(); + + list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list) + ppgtt_free_spt(spt); } static int ppgtt_handle_guest_write_page_table_bytes( @@ -1946,7 +1952,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) */ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) { - atomic_dec(&mm->pincount); + atomic_dec_if_positive(&mm->pincount); } /** diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d5fcc447d22f..a68addf95c23 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -905,7 +905,7 @@ static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off) static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, void *buf, unsigned long count, bool is_write) { - void *aperture_va; + void __iomem *aperture_va; if (!intel_vgpu_in_aperture(vgpu, off) || !intel_vgpu_in_aperture(vgpu, off + count)) { @@ -920,9 +920,9 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, return -EIO; if (is_write) - memcpy(aperture_va + offset_in_page(off), buf, count); + memcpy_toio(aperture_va + offset_in_page(off), buf, count); else - memcpy(buf, aperture_va + offset_in_page(off), count); + memcpy_fromio(buf, aperture_va + offset_in_page(off), count); io_mapping_unmap(aperture_va); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 159192c097cc..05b953793316 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1486,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_runtime_pm_put_unchecked(dev_priv); } - if (ret && (vgpu_is_vm_unhealthy(ret))) { - enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); + if (ret) { + if (vgpu_is_vm_unhealthy(ret)) + enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); intel_vgpu_destroy_workload(workload); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0bd890c04fe4..f6f6e5b78e97 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); if (ret) { - ret = -EINTR; + if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { + try_again = true; + continue; + } break; } crtc = connector->state->crtc; diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 73a7bee24a66..641e0778fa9c 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -323,6 +323,21 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) } } +static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, + struct intel_dsi *intel_dsi) +{ + enum port port; + + for_each_dsi_port(port, intel_dsi->ports) { + WARN_ON(intel_dsi->io_wakeref[port]); + intel_dsi->io_wakeref[port] = + intel_display_power_get(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO); + } +} + static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -336,13 +351,7 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); } - for_each_dsi_port(port, intel_dsi->ports) { - intel_dsi->io_wakeref[port] = - intel_display_power_get(dev_priv, - port == PORT_A ? - POWER_DOMAIN_PORT_DDI_A_IO : - POWER_DOMAIN_PORT_DDI_B_IO); - } + get_dsi_io_power_domains(dev_priv, intel_dsi); } static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) @@ -589,6 +598,12 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); } I915_WRITE(DPCLKA_CFGCR0_ICL, val); + + for_each_dsi_port(port, intel_dsi->ports) { + val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + POSTING_READ(DPCLKA_CFGCR0_ICL); mutex_unlock(&dev_priv->dpll_lock); @@ -1117,7 +1132,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) DRM_ERROR("DDI port:%c buffer not idle\n", port_name(port)); } - gen11_dsi_ungate_clocks(encoder); + gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) @@ -1218,20 +1233,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, return 0; } -static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state) +static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - u64 domains = 0; - enum port port; - - for_each_dsi_port(port, intel_dsi->ports) - if (port == PORT_A) - domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO); - else - domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO); - - return domains; + get_dsi_io_power_domains(to_i915(encoder->base.dev), + enc_to_intel_dsi(&encoder->base)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 14d580cdefd3..ab4e60dfd6a3 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2075,12 +2075,11 @@ intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port) intel_aux_power_domain(dig_port); } -static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state) +static void intel_ddi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port; - u64 domains; /* * TODO: Add support for MST encoders. Atm, the following should never @@ -2088,10 +2087,10 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, * hook. */ if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) - return 0; + return; dig_port = enc_to_dig_port(&encoder->base); - domains = BIT_ULL(dig_port->ddi_io_power_domain); + intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC @@ -2099,15 +2098,15 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, */ if (intel_crtc_has_dp_encoder(crtc_state) || intel_port_is_tc(dev_priv, encoder->port)) - domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port)); + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); /* * VDSC power is needed when DSC is enabled */ if (crtc_state->dsc_params.compression_enable) - domains |= BIT_ULL(intel_dsc_power_domain(crtc_state)); - - return domains; + intel_display_power_get(dev_priv, + intel_dsc_power_domain(crtc_state)); } void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) @@ -2825,10 +2824,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) return; } /* - * DSI ports should have their DDI clock ungated when disabled - * and gated when enabled. + * For DSI we keep the ddi clocks gated + * except during enable/disable sequence. */ - ddi_clk_needed = !encoder->base.crtc; + ddi_clk_needed = false; } val = I915_READ(DPCLKA_CFGCR0_ICL); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ccb616351bba..421aac80a838 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -15986,8 +15986,6 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) struct intel_encoder *encoder; for_each_intel_encoder(&dev_priv->drm, encoder) { - u64 get_domains; - enum intel_display_power_domain domain; struct intel_crtc_state *crtc_state; if (!encoder->get_power_domains) @@ -16001,9 +15999,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) continue; crtc_state = to_intel_crtc_state(encoder->base.crtc->state); - get_domains = encoder->get_power_domains(encoder, crtc_state); - for_each_power_domain(domain, get_domains) - intel_display_power_get(dev_priv, domain); + encoder->get_power_domains(encoder, crtc_state); } } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cf709835fb9a..8891f29a8c7f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1859,42 +1859,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, return -EINVAL; } -/* Optimize link config in order: max bpp, min lanes, min clock */ -static int -intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - const struct link_config_limits *limits) -{ - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - int bpp, clock, lane_count; - int mode_rate, link_clock, link_avail; - - for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { - mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, - bpp); - - for (lane_count = limits->min_lane_count; - lane_count <= limits->max_lane_count; - lane_count <<= 1) { - for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { - link_clock = intel_dp->common_rates[clock]; - link_avail = intel_dp_max_data_rate(link_clock, - lane_count); - - if (mode_rate <= link_avail) { - pipe_config->lane_count = lane_count; - pipe_config->pipe_bpp = bpp; - pipe_config->port_clock = link_clock; - - return 0; - } - } - } - } - - return -EINVAL; -} - static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) { int i, num_bpc; @@ -2031,15 +1995,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, limits.min_bpp = 6 * 3; limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); - if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) { + if (intel_dp_is_edp(intel_dp)) { /* * Use the maximum clock and number of lanes the eDP panel - * advertizes being capable of. The eDP 1.3 and earlier panels - * are generally designed to support only a single clock and - * lane configuration, and typically these values correspond to - * the native resolution of the panel. With eDP 1.4 rate select - * and DSC, this is decreasingly the case, and we need to be - * able to select less than maximum link config. + * advertizes being capable of. The panels are generally + * designed to support only a single clock and lane + * configuration, and typically these values correspond to the + * native resolution of the panel. */ limits.min_lane_count = limits.max_lane_count; limits.min_clock = limits.max_clock; @@ -2053,22 +2015,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp->common_rates[limits.max_clock], limits.max_bpp, adjusted_mode->crtc_clock); - if (intel_dp_is_edp(intel_dp)) - /* - * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 - * section A.1: "It is recommended that the minimum number of - * lanes be used, using the minimum link rate allowed for that - * lane configuration." - * - * Note that we use the max clock and lane count for eDP 1.3 and - * earlier, and fast vs. wide is irrelevant. - */ - ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, - &limits); - else - /* Optimize for slow and wide. */ - ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, - &limits); + /* + * Optimize for slow and wide. This is the place to add alternative + * optimization policy. + */ + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); /* enable compression if the mode doesn't fit available BW */ DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 15db41394b9e..d5660ac1b0d6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -270,10 +270,12 @@ struct intel_encoder { * be set correctly before calling this function. */ void (*get_config)(struct intel_encoder *, struct intel_crtc_state *pipe_config); - /* Returns a mask of power domains that need to be referenced as part - * of the hardware state readout code. */ - u64 (*get_power_domains)(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state); + /* + * Acquires the power domains needed for an active encoder during + * hardware state readout. + */ + void (*get_power_domains)(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); /* * Called during system suspend after all pending requests for the * encoder are flushed (for example for DP AUX transactions) and diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 6403728fe778..31c93c3ccd00 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -256,6 +256,28 @@ static void band_gap_reset(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->sb_lock); } +static int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 tmp; + + tmp = I915_READ(PIPEMISC(crtc->pipe)); + + switch (tmp & PIPEMISC_DITHER_BPC_MASK) { + case PIPEMISC_DITHER_6_BPC: + return 18; + case PIPEMISC_DITHER_8_BPC: + return 24; + case PIPEMISC_DITHER_10_BPC: + return 30; + case PIPEMISC_DITHER_12_BPC: + return 36; + default: + MISSING_CASE(tmp); + return 0; + } +} + static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -1071,6 +1093,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, bpp = mipi_dsi_pixel_format_to_bpp( pixel_format_from_register_bits(fmt)); + pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); + /* Enable Frame time stamo based scanline reporting */ adjusted_mode->private_flags |= I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 22e68a100e7b..5d333138f913 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -662,13 +662,11 @@ static unsigned int mt8173_calculate_factor(int clock) static unsigned int mt2701_calculate_factor(int clock) { if (clock <= 64000) - return 16; - else if (clock <= 128000) - return 8; - else if (clock <= 256000) return 4; - else + else if (clock <= 128000) return 2; + else + return 1; } static const struct mtk_dpi_conf mt8173_conf = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index cf59ea9bccfd..57ce4708ef1b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -15,6 +15,7 @@ #include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_of.h> @@ -341,6 +342,8 @@ static struct drm_driver mtk_drm_driver = { .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf, + .gem_prime_vmap = mtk_drm_gem_prime_vmap, + .gem_prime_vunmap = mtk_drm_gem_prime_vunmap, .fops = &mtk_drm_fops, .name = DRIVER_NAME, @@ -376,6 +379,10 @@ static int mtk_drm_bind(struct device *dev) if (ret < 0) goto err_deinit; + ret = drm_fbdev_generic_setup(drm, 32); + if (ret) + DRM_ERROR("Failed to initialize fbdev: %d\n", ret); + return 0; err_deinit: diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 259b7b0de1d2..38483e9ee071 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -241,3 +241,49 @@ err_gem_free: kfree(mtk_gem); return ERR_PTR(ret); } + +void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) +{ + struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + struct sg_table *sgt; + struct sg_page_iter iter; + unsigned int npages; + unsigned int i = 0; + + if (mtk_gem->kvaddr) + return mtk_gem->kvaddr; + + sgt = mtk_gem_prime_get_sg_table(obj); + if (IS_ERR(sgt)) + return NULL; + + npages = obj->size >> PAGE_SHIFT; + mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); + if (!mtk_gem->pages) + goto out; + + for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { + mtk_gem->pages[i++] = sg_page_iter_page(&iter); + if (i > npages) + break; + } + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + +out: + kfree((void *)sgt); + + return mtk_gem->kvaddr; +} + +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); + + if (!mtk_gem->pages) + return; + + vunmap(vaddr); + mtk_gem->kvaddr = 0; + kfree((void *)mtk_gem->pages); +} diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 534639b43a1c..c047a7ef294f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -37,6 +37,7 @@ struct mtk_drm_gem_obj { dma_addr_t dma_addr; unsigned long dma_attrs; struct sg_table *sg; + struct page **pages; }; #define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base) @@ -52,5 +53,7 @@ int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); +void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj); +void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); #endif diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 915cc84621ae..e04e6c293d39 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1480,7 +1480,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, if (IS_ERR(regmap)) ret = PTR_ERR(regmap); if (ret) { - ret = PTR_ERR(regmap); dev_err(dev, "Failed to get system configuration registers: %d\n", ret); @@ -1516,6 +1515,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(remote); hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np); + of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); return -EINVAL; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c index 4ef9c57ffd44..5223498502c4 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c @@ -15,28 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = { .owner = THIS_MODULE, }; -long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - hdmi_phy->pll_rate = rate; - if (rate <= 74250000) - *parent_rate = rate; - else - *parent_rate = rate / 2; - - return rate; -} - -unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - - return hdmi_phy->pll_rate; -} - void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, u32 bits) { @@ -110,13 +88,11 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy) return NULL; } -static void mtk_hdmi_phy_clk_get_ops(struct mtk_hdmi_phy *hdmi_phy, - const struct clk_ops **ops) +static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy, + struct clk_init_data *clk_init) { - if (hdmi_phy && hdmi_phy->conf && hdmi_phy->conf->hdmi_phy_clk_ops) - *ops = hdmi_phy->conf->hdmi_phy_clk_ops; - else - dev_err(hdmi_phy->dev, "Failed to get clk ops of phy\n"); + clk_init->flags = hdmi_phy->conf->flags; + clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops; } static int mtk_hdmi_phy_probe(struct platform_device *pdev) @@ -129,7 +105,6 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) struct clk_init_data clk_init = { .num_parents = 1, .parent_names = (const char * const *)&ref_clk_name, - .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, }; struct phy *phy; @@ -167,7 +142,7 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev) hdmi_phy->dev = dev; hdmi_phy->conf = (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev); - mtk_hdmi_phy_clk_get_ops(hdmi_phy, &clk_init.ops); + mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init); hdmi_phy->pll_hw.init = &clk_init; hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw); if (IS_ERR(hdmi_phy->pll)) { diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h index f39b1fc66612..2d8b3182470d 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h @@ -21,6 +21,7 @@ struct mtk_hdmi_phy; struct mtk_hdmi_phy_conf { bool tz_disabled; + unsigned long flags; const struct clk_ops *hdmi_phy_clk_ops; void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy); void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy); @@ -48,10 +49,6 @@ void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset, void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset, u32 val, u32 mask); struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw); -long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate); -unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate); extern struct platform_driver mtk_hdmi_phy_driver; extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf; diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c index fcc42dc6ea7f..d3cc4022e988 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c @@ -79,7 +79,6 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw) mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); usleep_range(80, 100); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); @@ -94,7 +93,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); usleep_range(80, 100); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); @@ -108,6 +106,12 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) usleep_range(80, 100); } +static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + return rate; +} + static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -116,13 +120,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (rate <= 64000000) pos_div = 3; - else if (rate <= 12800000) - pos_div = 1; + else if (rate <= 128000000) + pos_div = 2; else pos_div = 1; mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK); + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC), RG_HTPLL_IC_MASK); mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR), @@ -154,6 +159,39 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + unsigned long out_rate, val; + + val = (readl(hdmi_phy->regs + HDMI_CON6) + & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV; + switch (val) { + case 0x00: + out_rate = parent_rate; + break; + case 0x01: + out_rate = parent_rate / 2; + break; + default: + out_rate = parent_rate / 4; + break; + } + + val = (readl(hdmi_phy->regs + HDMI_CON6) + & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV; + out_rate *= (val + 1) * 2; + val = (readl(hdmi_phy->regs + HDMI_CON2) + & RG_HDMITX_TX_POSDIV_MASK); + out_rate >>= (val >> RG_HDMITX_TX_POSDIV); + + if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV) + out_rate /= 5; + + return out_rate; +} + static const struct clk_ops mtk_hdmi_phy_pll_ops = { .prepare = mtk_hdmi_pll_prepare, .unprepare = mtk_hdmi_pll_unprepare, @@ -174,7 +212,6 @@ static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy) mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); usleep_range(80, 100); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); @@ -186,7 +223,6 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN); usleep_range(80, 100); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK); @@ -202,6 +238,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = { .tz_disabled = true, + .flags = CLK_SET_RATE_GATE, .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index ed5916b27658..47f8a2951682 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c @@ -199,6 +199,20 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) usleep_range(100, 150); } +static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + hdmi_phy->pll_rate = rate; + if (rate <= 74250000) + *parent_rate = rate; + else + *parent_rate = rate / 2; + + return rate; +} + static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -285,6 +299,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } +static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); + + return hdmi_phy->pll_rate; +} + static const struct clk_ops mtk_hdmi_phy_pll_ops = { .prepare = mtk_hdmi_pll_prepare, .unprepare = mtk_hdmi_pll_unprepare, @@ -309,6 +331,7 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy) } struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = { + .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops, .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds, .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index 340383150fb9..ebf9c96d43ee 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -175,6 +175,7 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3); hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE); hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE); + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); hdmi4_core_disable(core); return 0; } @@ -182,16 +183,24 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) if (err) return err; + /* + * Initialize CEC clock divider: CEC needs 2MHz clock hence + * set the divider to 24 to get 48/24=2MHz clock + */ + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); + /* Clear TX FIFO */ if (!hdmi_cec_clear_tx_fifo(adap)) { pr_err("cec-%s: could not clear TX FIFO\n", adap->name); - return -EIO; + err = -EIO; + goto err_disable_clk; } /* Clear RX FIFO */ if (!hdmi_cec_clear_rx_fifo(adap)) { pr_err("cec-%s: could not clear RX FIFO\n", adap->name); - return -EIO; + err = -EIO; + goto err_disable_clk; } /* Clear CEC interrupts */ @@ -236,6 +245,12 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp); } return 0; + +err_disable_clk: + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); + hdmi4_core_disable(core); + + return err; } static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) @@ -333,11 +348,8 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core, return ret; core->wp = wp; - /* - * Initialize CEC clock divider: CEC needs 2MHz clock hence - * set the devider to 24 to get 48/24=2MHz clock - */ - REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0); + /* Disable clock initially, hdmi_cec_adap_enable() manages it */ + REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0, 5, 0); ret = cec_register_adapter(core->adap, &pdev->dev); if (ret < 0) { diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 813ba42f2753..e384b95ad857 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -708,7 +708,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, else acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; /* - * The I2S input word length is twice the lenght given in the IEC-60958 + * The I2S input word length is twice the length given in the IEC-60958 * status word. If the word size is greater than * 20 bits, increment by one. */ diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index dc47720c99ba..39d8509d96a0 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -48,8 +48,13 @@ static enum drm_mode_status sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector, const struct drm_display_mode *mode) { - /* This is max for HDMI 2.0b (4K@60Hz) */ - if (mode->clock > 594000) + /* + * Controller support maximum of 594 MHz, which correlates to + * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than + * 340 MHz scrambling has to be enabled. Because scrambling is + * not yet implemented, just limit to 340 MHz for now. + */ + if (mode->clock > 340000) return MODE_CLOCK_HIGH; return MODE_OK; diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index fc36e0c10a37..b1e7c76e9c17 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -227,7 +227,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master, err_unregister_gates: for (i = 0; i < CLK_NUM; i++) - if (clk_data->hws[i]) + if (!IS_ERR_OR_NULL(clk_data->hws[i])) clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); err_assert_reset: @@ -245,7 +245,8 @@ static void sun8i_tcon_top_unbind(struct device *dev, struct device *master, of_clk_del_provider(dev->of_node); for (i = 0; i < CLK_NUM; i++) - clk_hw_unregister_gate(clk_data->hws[i]); + if (clk_data->hws[i]) + clk_hw_unregister_gate(clk_data->hws[i]); clk_disable_unprepare(tcon_top->bus); reset_control_assert(tcon_top->rst); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 47c55974756d..d23c4bfde790 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) hdmi->dvi = !tegra_output_is_hdmi(output); if (!hdmi->dvi) { - err = tegra_hdmi_setup_audio(hdmi); - if (err < 0) - hdmi->dvi = true; + /* + * Make sure that the audio format has been configured before + * enabling audio, otherwise we may try to divide by zero. + */ + if (hdmi->format.sample_rate > 0) { + err = tegra_hdmi_setup_audio(hdmi); + if (err < 0) + hdmi->dvi = true; + } } if (hdmi->config->has_hda) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3f56647cdb35..0fa5034b9f9e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -876,8 +876,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, reservation_object_add_shared_fence(bo->resv, fence); ret = reservation_object_reserve_shared(bo->resv, 1); - if (unlikely(ret)) + if (unlikely(ret)) { + dma_fence_put(fence); return ret; + } dma_fence_put(bo->moving); bo->moving = fence; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index f841accc2c00..627f8dc91d0e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, } #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!(flags & TTM_PAGE_FLAG_DMA32)) { - for (j = 0; j < HPAGE_PMD_NR; ++j) - if (p++ != pages[i + j]) + if (!(flags & TTM_PAGE_FLAG_DMA32) && + (npages - i) >= HPAGE_PMD_NR) { + for (j = 1; j < HPAGE_PMD_NR; ++j) + if (++p != pages[i + j]) break; if (j == HPAGE_PMD_NR) @@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, unsigned max_size, n2free; spin_lock_irqsave(&huge->lock, irq_flags); - while (i < npages) { + while ((npages - i) >= HPAGE_PMD_NR) { struct page *p = pages[i]; unsigned j; if (!p) break; - for (j = 0; j < HPAGE_PMD_NR; ++j) - if (p++ != pages[i + j]) + for (j = 1; j < HPAGE_PMD_NR; ++j) + if (++p != pages[i + j]) break; if (j != HPAGE_PMD_NR) diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 22cd2d13e272..ff47f890e6ad 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -52,6 +52,7 @@ static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, .load = udl_driver_load, .unload = udl_driver_unload, + .release = udl_driver_release, /* gem hooks */ .gem_free_object_unlocked = udl_gem_free_object, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index e9e9b1ff678e..4ae67d882eae 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb); int udl_driver_load(struct drm_device *dev, unsigned long flags); void udl_driver_unload(struct drm_device *dev); +void udl_driver_release(struct drm_device *dev); int udl_fbdev_init(struct drm_device *dev); void udl_fbdev_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 9086d0d1b880..1f8ef34ade24 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -379,6 +379,12 @@ void udl_driver_unload(struct drm_device *dev) udl_free_urb_list(dev); udl_fbdev_cleanup(dev); - udl_modeset_cleanup(dev); kfree(udl); } + +void udl_driver_release(struct drm_device *dev) +{ + udl_modeset_cleanup(dev); + drm_dev_fini(dev); + kfree(dev); +} diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c index 27101c04a827..0c0eb43abf65 100644 --- a/drivers/gpu/host1x/hw/channel_hw.c +++ b/drivers/gpu/host1x/hw/channel_hw.c @@ -115,8 +115,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job) static void host1x_channel_set_streamid(struct host1x_channel *channel) { #if HOST1X_HW >= 6 + u32 sid = 0x7f; +#ifdef CONFIG_IOMMU_API struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent); - u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f; + if (spec) + sid = spec->ids[0] & 0xffff; +#endif host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID); #endif diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 6ca8d322b487..4ca0cdfa6b33 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -150,6 +150,7 @@ config HID_ASUS tristate "Asus" depends on LEDS_CLASS depends on ASUS_WMI || ASUS_WMI=n + select POWER_SUPPLY ---help--- Support for Asus notebook built-in keyboard and touchpad via i2c, and the Asus Republic of Gamers laptop keyboard special keys. diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9993b692598f..860e21ec6a49 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n) u32 hid_field_extract(const struct hid_device *hid, u8 *report, unsigned offset, unsigned n) { - if (n > 32) { - hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", + if (n > 256) { + hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n", n, current->comm); - n = 32; + n = 256; } return __extract(report, offset, n); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index ac9fda1b5a72..1384e57182af 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p) seq_printf(f, "\n\n"); /* dump parsed data and input mappings */ + if (down_interruptible(&hdev->driver_input_lock)) + return 0; + hid_dump_device(hdev, f); seq_printf(f, "\n"); hid_dump_input_mapping(hdev, f); + up(&hdev->driver_input_lock); + return 0; } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b6d93f4ad037..adce58f24f76 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1083,6 +1083,7 @@ #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 +#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e #define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 #define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index b10b1922c5bd..b607286a0bc8 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; } + if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */ + switch (usage->hid & 0xf) { + case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break; + default: goto ignore; + } + break; + } + /* * Some lazy vendors declare 255 usages for System Control, * leading to the creation of ABS_X|Y axis and too many others. @@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x06a: map_key_clear(KEY_GREEN); break; case 0x06b: map_key_clear(KEY_BLUE); break; case 0x06c: map_key_clear(KEY_YELLOW); break; - case 0x06d: map_key_clear(KEY_ZOOM); break; + case 0x06d: map_key_clear(KEY_ASPECT_RATIO); break; case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break; case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break; @@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break; case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break; + case 0x079: map_key_clear(KEY_KBDILLUMUP); break; + case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break; + case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break; + case 0x082: map_key_clear(KEY_VIDEO_NEXT); break; case 0x083: map_key_clear(KEY_LAST); break; case 0x084: map_key_clear(KEY_ENTER); break; @@ -998,6 +1010,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x1b8: map_key_clear(KEY_VIDEO); break; case 0x1bc: map_key_clear(KEY_MESSENGER); break; case 0x1bd: map_key_clear(KEY_INFO); break; + case 0x1cb: map_key_clear(KEY_ASSISTANT); break; case 0x201: map_key_clear(KEY_NEW); break; case 0x202: map_key_clear(KEY_OPEN); break; case 0x203: map_key_clear(KEY_CLOSE); break; @@ -1021,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x22d: map_key_clear(KEY_ZOOMIN); break; case 0x22e: map_key_clear(KEY_ZOOMOUT); break; case 0x22f: map_key_clear(KEY_ZOOMRESET); break; + case 0x232: map_key_clear(KEY_FULL_SCREEN); break; case 0x233: map_key_clear(KEY_SCROLLUP); break; case 0x234: map_key_clear(KEY_SCROLLDOWN); break; case 0x238: /* AC Pan */ @@ -1044,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; + case 0x29f: map_key_clear(KEY_SCALE); break; + default: map_key_clear(KEY_UNKNOWN); } break; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 15ed6177a7a3..199cc256e9d9 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) kfree(data); return -ENOMEM; } + data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); + if (!data->wq) { + kfree(data->effect_ids); + kfree(data); + return -ENOMEM; + } + data->hidpp = hidpp; data->feature_index = feature_index; data->version = version; @@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) /* ignore boost value at response.fap.params[2] */ /* init the hardware command queue */ - data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); atomic_set(&data->workqueue_size, 0); /* initialize with zero autocenter to get wheel in usable state */ @@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) input_report_rel(mydata->input, REL_Y, v); v = hid_snto32(data[6], 8); - hidpp_scroll_counter_handle_scroll( - &hidpp->vertical_wheel_counter, v); + if (v != 0) + hidpp_scroll_counter_handle_scroll( + &hidpp->vertical_wheel_counter, v); input_sync(mydata->input); } diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 953908f2267c..77ffba48cc73 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) }, { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, @@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = { { } }; -/** +/* * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer * * There are composite devices for which we want to ignore only a certain @@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev) if (hdev->product == 0x0401 && strncmp(hdev->name, "ELAN0800", 8) != 0) return true; + /* Same with product id 0x0400 */ + if (hdev->product == 0x0400 && + strncmp(hdev->name, "QTEC0001", 8) != 0) + return true; break; } @@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev) } if (bl_entry != NULL) - dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", + dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n", bl_entry->driver_data, bl_entry->vendor, bl_entry->product); @@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev) quirks |= bl_entry->driver_data; if (quirks) - dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", + dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n", quirks, hdev->vendor, hdev->product); return quirks; } diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 8141cadfca0e..8dae0f9b819e 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam) static int steam_register(struct steam_device *steam) { int ret; + bool client_opened; /* * This function can be called several times in a row with the @@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam) * Unlikely, but getting the serial could fail, and it is not so * important, so make up a serial number and go on. */ + mutex_lock(&steam->mutex); if (steam_get_serial(steam) < 0) strlcpy(steam->serial_no, "XXXXXXXXXX", sizeof(steam->serial_no)); + mutex_unlock(&steam->mutex); hid_info(steam->hdev, "Steam Controller '%s' connected", steam->serial_no); @@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam) } mutex_lock(&steam->mutex); - if (!steam->client_opened) { + client_opened = steam->client_opened; + if (!client_opened) steam_set_lizard_mode(steam, lizard_mode); + mutex_unlock(&steam->mutex); + + if (!client_opened) ret = steam_input_register(steam); - } else { + else ret = 0; - } - mutex_unlock(&steam->mutex); return ret; } @@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev) { struct steam_device *steam = hdev->driver_data; + unsigned long flags; + bool connected; + + spin_lock_irqsave(&steam->lock, flags); + connected = steam->connected; + spin_unlock_irqrestore(&steam->lock, flags); + mutex_lock(&steam->mutex); steam->client_opened = false; + if (connected) + steam_set_lizard_mode(steam, lizard_mode); mutex_unlock(&steam->mutex); - if (steam->connected) { - steam_set_lizard_mode(steam, lizard_mode); + if (connected) steam_input_register(steam); - } } static int steam_client_ll_raw_request(struct hid_device *hdev, diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index 7710d9f957da..0187c9f8fc22 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params, goto cleanup; } rc = usb_string(udev, 201, ver_ptr, ver_len); - if (ver_ptr == NULL) { - rc = -ENOMEM; - goto cleanup; - } if (rc == -EPIPE) { *ver_ptr = '\0'; } else if (rc < 0) { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 90164fed08d3..4d1f24ee249c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -184,6 +184,8 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_NO_RUNTIME_PM }, { USB_VENDOR_ID_ELAN, HID_ANY_ID, I2C_HID_QUIRK_BOGUS_IRQ }, + { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E, + I2C_HID_QUIRK_NO_RUNTIME_PM }, { 0, 0 } }; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6f929bfa9fcd..d0f1dfe2bcbb 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1759,6 +1759,7 @@ config SENSORS_VT8231 config SENSORS_W83773G tristate "Nuvoton W83773G" depends on I2C + select REGMAP_I2C help If you say yes here you get support for the Nuvoton W83773G hardware monitoring chip. diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index e4f9f7ce92fa..f9abeeeead9e 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = { }; static const u32 ntc_temp_config[] = { - HWMON_T_INPUT, HWMON_T_TYPE, + HWMON_T_INPUT | HWMON_T_TYPE, 0 }; diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index b91a80abf724..4679acb4918e 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ) s++; } } + + s = (sensors->power.num_sensors * 4) + 1; } else { for (i = 0; i < sensors->power.num_sensors; ++i) { s = i + 1; @@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ) show_power, NULL, 3, i); attr++; } - } - if (sensors->caps.num_sensors >= 1) { s = sensors->power.num_sensors + 1; + } + if (sensors->caps.num_sensors >= 1) { snprintf(attr->name, sizeof(attr->name), "power%d_label", s); attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 0, 0); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 42fed40198a0..c0c3043b5d61 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev) /* Init DMA config if supported */ ret = i2c_imx_dma_request(i2c_imx, phy_addr); if (ret < 0) - goto clk_notifier_unregister; + goto del_adapter; dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); return 0; /* Return OK */ +del_adapter: + i2c_del_adapter(&i2c_imx->adapter); clk_notifier_unregister: clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); rpm_disable: diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 2dc628d4f1ae..1412abcff010 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, { struct i3c_dev_boardinfo *boardinfo; struct device *dev = &master->dev; - struct i3c_device_info info = { }; enum i3c_addr_slot_status addrstatus; u32 init_dyn_addr = 0; @@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master, boardinfo->pid = ((u64)reg[1] << 32) | reg[2]; - if ((info.pid & GENMASK_ULL(63, 48)) || - I3C_PID_RND_LOWER_32BITS(info.pid)) + if ((boardinfo->pid & GENMASK_ULL(63, 48)) || + I3C_PID_RND_LOWER_32BITS(boardinfo->pid)) return -EINVAL; boardinfo->init_dyn_addr = init_dyn_addr; diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 59279224e07f..10c26ffaa8ef 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master) static void dw_i3c_master_disable(struct dw_i3c_master *master) { - writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE, + writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE, master->regs + DEVICE_CTRL); } diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 7096e577b23f..50f3ff386bea 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev) mutex_lock(&data->mutex); ret = kxcjk1013_set_mode(data, OPERATION); + if (ret == 0) + ret = kxcjk1013_set_range(data, data->range); mutex_unlock(&data->mutex); return ret; diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index ff5f2da2e1b1..54d9978b2740 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta, if (sigma_delta->info->has_registers) { data[0] = reg << sigma_delta->info->addr_shift; data[0] |= sigma_delta->info->read_mask; + data[0] |= sigma_delta->comm; spi_message_add_tail(&t[0], &m); } spi_message_add_tail(&t[1], &m); diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 75d2f73582a3..596841a3c4db 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev, ret = wait_event_interruptible_timeout(st->wq_data_avail, st->done, msecs_to_jiffies(1000)); - if (ret == 0) - ret = -ETIMEDOUT; - if (ret < 0) { - mutex_unlock(&st->lock); - return ret; - } - - *val = st->last_value; + /* Disable interrupts, regardless if adc conversion was + * successful or not + */ at91_adc_writel(st, AT91_ADC_CHDR, AT91_ADC_CH(chan->channel)); at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); - st->last_value = 0; - st->done = false; + if (ret > 0) { + /* a valid conversion took place */ + *val = st->last_value; + st->last_value = 0; + st->done = false; + ret = IIO_VAL_INT; + } else if (ret == 0) { + /* conversion timeout */ + dev_err(&idev->dev, "ADC Channel %d timeout.\n", + chan->channel); + ret = -ETIMEDOUT; + } + mutex_unlock(&st->lock); - return IIO_VAL_INT; + return ret; case IIO_CHAN_INFO_SCALE: *val = st->vref_mv; diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index b13c61539d46..6401ca7a9a20 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev) err_free_irq: free_irq(xadc->irq, indio_dev); + cancel_delayed_work_sync(&xadc->zynq_unmask_work); err_clk_disable_unprepare: clk_disable_unprepare(xadc->clk); err_free_samplerate_trigger: @@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev) iio_triggered_buffer_cleanup(indio_dev); } free_irq(xadc->irq, indio_dev); + cancel_delayed_work_sync(&xadc->zynq_unmask_work); clk_disable_unprepare(xadc->clk); - cancel_delayed_work(&xadc->zynq_unmask_work); kfree(xadc->data); kfree(indio_dev->channels); diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index d5d146e9e372..92c684d2b67e 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -64,6 +64,7 @@ config IAQCORE config PMS7003 tristate "Plantower PMS7003 particulate matter sensor" depends on SERIAL_DEV_BUS + select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Plantower PMS7003 particulate matter sensor. @@ -71,6 +72,19 @@ config PMS7003 To compile this driver as a module, choose M here: the module will be called pms7003. +config SENSIRION_SGP30 + tristate "Sensirion SGPxx gas sensors" + depends on I2C + select CRC8 + help + Say Y here to build I2C interface support for the following + Sensirion SGP gas sensors: + * SGP30 gas sensor + * SGPC3 low power gas sensor + + To compile this driver as module, choose M here: the + module will be called sgp30. + config SPS30 tristate "SPS30 particulate matter sensor" depends on I2C diff --git a/drivers/iio/chemical/bme680.h b/drivers/iio/chemical/bme680.h index 0ae89b87e2d6..4edc5d21cb9f 100644 --- a/drivers/iio/chemical/bme680.h +++ b/drivers/iio/chemical/bme680.h @@ -2,11 +2,9 @@ #ifndef BME680_H_ #define BME680_H_ -#define BME680_REG_CHIP_I2C_ID 0xD0 -#define BME680_REG_CHIP_SPI_ID 0x50 +#define BME680_REG_CHIP_ID 0xD0 #define BME680_CHIP_ID_VAL 0x61 -#define BME680_REG_SOFT_RESET_I2C 0xE0 -#define BME680_REG_SOFT_RESET_SPI 0x60 +#define BME680_REG_SOFT_RESET 0xE0 #define BME680_CMD_SOFTRESET 0xB6 #define BME680_REG_STATUS 0x73 #define BME680_SPI_MEM_PAGE_BIT BIT(4) diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c index 70c1fe4366f4..ccde4c65ff93 100644 --- a/drivers/iio/chemical/bme680_core.c +++ b/drivers/iio/chemical/bme680_core.c @@ -63,9 +63,23 @@ struct bme680_data { s32 t_fine; }; +static const struct regmap_range bme680_volatile_ranges[] = { + regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB), + regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS), + regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG), +}; + +static const struct regmap_access_table bme680_volatile_table = { + .yes_ranges = bme680_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges), +}; + const struct regmap_config bme680_regmap_config = { .reg_bits = 8, .val_bits = 8, + .max_register = 0xef, + .volatile_table = &bme680_volatile_table, + .cache_type = REGCACHE_RBTREE, }; EXPORT_SYMBOL(bme680_regmap_config); @@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data, s64 var1, var2, var3; s16 calc_temp; + /* If the calibration is invalid, attempt to reload it */ + if (!calib->par_t2) + bme680_read_calib(data, calib); + var1 = (adc_temp >> 3) - (calib->par_t1 << 1); var2 = (var1 * calib->par_t2) >> 11; var3 = ((var1 >> 1) * (var1 >> 1)) >> 12; @@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data) return ret; } -static int bme680_read_temp(struct bme680_data *data, - int *val, int *val2) +static int bme680_read_temp(struct bme680_data *data, int *val) { struct device *dev = regmap_get_device(data->regmap); int ret; @@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data, * compensate_press/compensate_humid to get compensated * pressure/humidity readings. */ - if (val && val2) { - *val = comp_temp; - *val2 = 100; - return IIO_VAL_FRACTIONAL; + if (val) { + *val = comp_temp * 10; /* Centidegrees to millidegrees */ + return IIO_VAL_INT; } return ret; @@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data, s32 adc_press; /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL, NULL); + ret = bme680_read_temp(data, NULL); if (ret < 0) return ret; @@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data, u32 comp_humidity; /* Read and compensate temperature to get a reading of t_fine */ - ret = bme680_read_temp(data, NULL, NULL); + ret = bme680_read_temp(data, NULL); if (ret < 0) return ret; @@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_PROCESSED: switch (chan->type) { case IIO_TEMP: - return bme680_read_temp(data, val, val2); + return bme680_read_temp(data, val); case IIO_PRESSURE: return bme680_read_press(data, val, val2); case IIO_HUMIDITYRELATIVE: @@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap, { struct iio_dev *indio_dev; struct bme680_data *data; + unsigned int val; int ret; + ret = regmap_write(regmap, BME680_REG_SOFT_RESET, + BME680_CMD_SOFTRESET); + if (ret < 0) { + dev_err(dev, "Failed to reset chip\n"); + return ret; + } + + ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val); + if (ret < 0) { + dev_err(dev, "Error reading chip ID\n"); + return ret; + } + + if (val != BME680_CHIP_ID_VAL) { + dev_err(dev, "Wrong chip ID, got %x expected %x\n", + val, BME680_CHIP_ID_VAL); + return -ENODEV; + } + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; diff --git a/drivers/iio/chemical/bme680_i2c.c b/drivers/iio/chemical/bme680_i2c.c index b2f805b6b36a..de9c9e3d23ea 100644 --- a/drivers/iio/chemical/bme680_i2c.c +++ b/drivers/iio/chemical/bme680_i2c.c @@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client, { struct regmap *regmap; const char *name = NULL; - unsigned int val; - int ret; regmap = devm_regmap_init_i2c(client, &bme680_regmap_config); if (IS_ERR(regmap)) { @@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client, return PTR_ERR(regmap); } - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C, - BME680_CMD_SOFTRESET); - if (ret < 0) { - dev_err(&client->dev, "Failed to reset chip\n"); - return ret; - } - - ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val); - if (ret < 0) { - dev_err(&client->dev, "Error reading I2C chip ID\n"); - return ret; - } - - if (val != BME680_CHIP_ID_VAL) { - dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n", - val, BME680_CHIP_ID_VAL); - return -ENODEV; - } - if (id) name = id->name; diff --git a/drivers/iio/chemical/bme680_spi.c b/drivers/iio/chemical/bme680_spi.c index d0b7bdd3f066..3b838068a7e4 100644 --- a/drivers/iio/chemical/bme680_spi.c +++ b/drivers/iio/chemical/bme680_spi.c @@ -12,28 +12,93 @@ #include "bme680.h" +struct bme680_spi_bus_context { + struct spi_device *spi; + u8 current_page; +}; + +/* + * In SPI mode there are only 7 address bits, a "page" register determines + * which part of the 8-bit range is active. This function looks at the address + * and writes the page selection bit if needed + */ +static int bme680_regmap_spi_select_page( + struct bme680_spi_bus_context *ctx, u8 reg) +{ + struct spi_device *spi = ctx->spi; + int ret; + u8 buf[2]; + u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */ + + if (page == ctx->current_page) + return 0; + + /* + * Data sheet claims we're only allowed to change bit 4, so we must do + * a read-modify-write on each and every page select + */ + buf[0] = BME680_REG_STATUS; + ret = spi_write_then_read(spi, buf, 1, buf + 1, 1); + if (ret < 0) { + dev_err(&spi->dev, "failed to set page %u\n", page); + return ret; + } + + buf[0] = BME680_REG_STATUS; + if (page) + buf[1] |= BME680_SPI_MEM_PAGE_BIT; + else + buf[1] &= ~BME680_SPI_MEM_PAGE_BIT; + + ret = spi_write(spi, buf, 2); + if (ret < 0) { + dev_err(&spi->dev, "failed to set page %u\n", page); + return ret; + } + + ctx->current_page = page; + + return 0; +} + static int bme680_regmap_spi_write(void *context, const void *data, size_t count) { - struct spi_device *spi = context; + struct bme680_spi_bus_context *ctx = context; + struct spi_device *spi = ctx->spi; + int ret; u8 buf[2]; memcpy(buf, data, 2); + + ret = bme680_regmap_spi_select_page(ctx, buf[0]); + if (ret) + return ret; + /* * The SPI register address (= full register address without bit 7) * and the write command (bit7 = RW = '0') */ buf[0] &= ~0x80; - return spi_write_then_read(spi, buf, 2, NULL, 0); + return spi_write(spi, buf, 2); } static int bme680_regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { - struct spi_device *spi = context; + struct bme680_spi_bus_context *ctx = context; + struct spi_device *spi = ctx->spi; + int ret; + u8 addr = *(const u8 *)reg; + + ret = bme680_regmap_spi_select_page(ctx, addr); + if (ret) + return ret; - return spi_write_then_read(spi, reg, reg_size, val, val_size); + addr |= 0x80; /* bit7 = RW = '1' */ + + return spi_write_then_read(spi, &addr, 1, val, val_size); } static struct regmap_bus bme680_regmap_bus = { @@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = { static int bme680_spi_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); + struct bme680_spi_bus_context *bus_context; struct regmap *regmap; - unsigned int val; int ret; spi->bits_per_word = 8; @@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi) return ret; } + bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL); + if (!bus_context) + return -ENOMEM; + + bus_context->spi = spi; + bus_context->current_page = 0xff; /* Undefined on warm boot */ + regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus, - &spi->dev, &bme680_regmap_config); + bus_context, &bme680_regmap_config); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Failed to register spi regmap %d\n", (int)PTR_ERR(regmap)); return PTR_ERR(regmap); } - ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI, - BME680_CMD_SOFTRESET); - if (ret < 0) { - dev_err(&spi->dev, "Failed to reset chip\n"); - return ret; - } - - /* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */ - ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val); - if (ret < 0) { - dev_err(&spi->dev, "Error reading SPI chip ID\n"); - return ret; - } - - if (val != BME680_CHIP_ID_VAL) { - dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n", - val, BME680_CHIP_ID_VAL); - return -ENODEV; - } - /* - * select Page 1 of spi_mem_page to enable access to - * to registers from address 0x00 to 0x7F. - */ - ret = regmap_write_bits(regmap, BME680_REG_STATUS, - BME680_SPI_MEM_PAGE_BIT, - BME680_SPI_MEM_PAGE_1_VAL); - if (ret < 0) { - dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n"); - return ret; - } - return bme680_core_probe(&spi->dev, regmap, id->name); } diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 89cb0066a6e0..8d76afb87d87 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, * Do not use IIO_DEGREE_TO_RAD to avoid precision * loss. Round to the nearest integer. */ - *val = div_s64(val64 * 314159 + 9000000ULL, 1000); - *val2 = 18000 << (CROS_EC_SENSOR_BITS - 1); - ret = IIO_VAL_FRACTIONAL; + *val = 0; + *val2 = div_s64(val64 * 3141592653ULL, + 180 << (CROS_EC_SENSOR_BITS - 1)); + ret = IIO_VAL_INT_PLUS_NANO; break; case MOTIONSENSE_TYPE_MAG: /* diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 6d71fd905e29..c701a45469f6 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev, inoutbuf[0] = 0x60; /* write EEPROM */ inoutbuf[0] |= data->ref_mode << 3; + inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0; inoutbuf[1] = data->dac_value >> 4; inoutbuf[2] = (data->dac_value & 0xf) << 4; diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 63ca31628a93..92c07ab826eb 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: return bmg160_get_filter(data, val); case IIO_CHAN_INFO_SCALE: - *val = 0; switch (chan->type) { case IIO_TEMP: - *val2 = 500000; - return IIO_VAL_INT_PLUS_MICRO; + *val = 500; + return IIO_VAL_INT; case IIO_ANGL_VEL: { int i; @@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev, for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) { if (bmg160_scale_table[i].dps_range == data->dps_range) { + *val = 0; *val2 = bmg160_scale_table[i].scale; return IIO_VAL_INT_PLUS_MICRO; } diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 77fac81a3adc..5ddebede31a6 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -29,7 +29,8 @@ #include "mpu3050.h" -#define MPU3050_CHIP_ID 0x69 +#define MPU3050_CHIP_ID 0x68 +#define MPU3050_CHIP_ID_MASK 0x7E /* * Register map: anything suffixed *_H is a big-endian high byte and always @@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev, goto err_power_down; } - if (val != MPU3050_CHIP_ID) { - dev_err(dev, "unsupported chip id %02x\n", (u8)val); + if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) { + dev_err(dev, "unsupported chip id %02x\n", + (u8)(val & MPU3050_CHIP_ID_MASK)); ret = -ENODEV; goto err_power_down; } diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index cd5bfe39591b..dadd921a4a30 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev, const unsigned long *mask; unsigned long *trialmask; - trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength), - sizeof(*trialmask), - GFP_KERNEL); + trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), + sizeof(*trialmask), GFP_KERNEL); if (trialmask == NULL) return -ENOMEM; if (!indio_dev->masklength) { diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 4700fd5d8c90..9c4d92115504 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register); **/ void iio_device_unregister(struct iio_dev *indio_dev) { - mutex_lock(&indio_dev->info_exist_lock); - cdev_device_del(&indio_dev->chrdev, &indio_dev->dev); + mutex_lock(&indio_dev->info_exist_lock); + iio_device_unregister_debugfs(indio_dev); iio_disable_all_buffers(indio_dev); diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 70b7d80431a9..f2e7ffe6fc54 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -993,6 +993,8 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) * will only be one mm, so no big deal. */ down_write(&mm->mmap_sem); + if (!mmget_still_valid(mm)) + goto skip_mm; mutex_lock(&ufile->umap_lock); list_for_each_entry_safe (priv, next_priv, &ufile->umaps, list) { @@ -1007,6 +1009,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); } mutex_unlock(&ufile->umap_lock); + skip_mm: up_write(&mm->mmap_sem); mmput(mm); } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 612f04190ed8..9784c6c0d2ec 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -13232,7 +13232,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd) int total_contexts; int ret; unsigned ngroups; - int qos_rmt_count; + int rmt_count; int user_rmt_reduced; u32 n_usr_ctxts; u32 send_contexts = chip_send_contexts(dd); @@ -13294,10 +13294,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd) n_usr_ctxts = rcv_contexts - total_contexts; } - /* each user context requires an entry in the RMT */ - qos_rmt_count = qos_rmt_entries(dd, NULL, NULL); - if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { - user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count; + /* + * The RMT entries are currently allocated as shown below: + * 1. QOS (0 to 128 entries); + * 2. FECN for PSM (num_user_contexts + num_vnic_contexts); + * 3. VNIC (num_vnic_contexts). + * It should be noted that PSM FECN oversubscribe num_vnic_contexts + * entries of RMT because both VNIC and PSM could allocate any receive + * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, + * and PSM FECN must reserve an RMT entry for each possible PSM receive + * context. + */ + rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2); + if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { + user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; dd_dev_err(dd, "RMT size is reducing the number of user receive contexts from %u to %d\n", n_usr_ctxts, @@ -14285,9 +14295,11 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, u64 reg; int i, idx, regoff, regidx; u8 offset; + u32 total_cnt; /* there needs to be enough room in the map table */ - if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) { + total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt; + if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); return; } @@ -14341,7 +14353,7 @@ static void init_user_fecn_handling(struct hfi1_devdata *dd, /* add rule 1 */ add_rsm_rule(dd, RSM_INS_FECN, &rrd); - rmt->used += dd->num_user_contexts; + rmt->used += total_cnt; } /* Initialize RSM for VNIC */ diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 9b643c2409cf..eba300330a02 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -898,7 +898,9 @@ void notify_error_qp(struct rvt_qp *qp) if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) { - qp->s_flags &= ~RVT_S_ANY_WAIT_IO; + qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB); + iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID); list_del_init(&priv->s_iowait.list); priv->s_iowait.lock = NULL; rvt_put_qp(qp); diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index e6726c1ab866..5991211d72bd 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -3088,7 +3088,7 @@ send_last: update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; - if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + if (e->rdma_sge.mr) { rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } @@ -3166,7 +3166,7 @@ send_last: update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; - if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { + if (e->rdma_sge.mr) { rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index fdda33aca77f..43cbce7a19ea 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -5017,24 +5017,14 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) make_tid_rdma_ack(qp, ohdr, ps)) return 1; - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { - if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) - goto bail; - /* We are in the error state, flush the work request. */ - if (qp->s_last == READ_ONCE(qp->s_head)) - goto bail; - /* If DMAs are in progress, we can't flush immediately. */ - if (iowait_sdma_pending(&priv->s_iowait)) { - qp->s_flags |= RVT_S_WAIT_DMA; - goto bail; - } - clear_ahg(qp); - wqe = rvt_get_swqe_ptr(qp, qp->s_last); - hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? - IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); - /* will get called again */ - goto done_free_tx; - } + /* + * Bail out if we can't send data. + * Be reminded that this check must been done after the call to + * make_tid_rdma_ack() because the responding QP could be in + * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA. + */ + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) + goto bail; if (priv->s_flags & RVT_S_WAIT_ACK) goto bail; @@ -5144,11 +5134,6 @@ int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps) hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2, middle, ps); return 1; -done_free_tx: - hfi1_put_txreq(ps->s_txreq); - ps->s_txreq = NULL; - return 1; - bail: hfi1_put_txreq(ps->s_txreq); bail_no_tx: diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index f1fec56f3ff4..8e29dbb5b5fb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -792,6 +792,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk; dma_offset = offset = idx_offset * table->obj_size; } else { + u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ + hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); /* mtt mhop */ i = mhop.l0_idx; @@ -803,8 +805,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, hem_idx = i; hem = table->hem[hem_idx]; - dma_offset = offset = (obj & (table->num_obj - 1)) * - table->obj_size % mhop.bt_chunk_size; + dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size % + mhop.bt_chunk_size; if (mhop.hop_num == 2) dma_offset = offset = 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index b09f1cde2ff5..08be0e4eabcd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -746,7 +746,6 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table; dma_addr_t dma_handle; __le64 *mtts; - u32 s = start_index * sizeof(u64); u32 bt_page_size; u32 i; @@ -780,7 +779,8 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, return -EINVAL; mtts = hns_roce_table_find(hr_dev, table, - mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, + mtt->first_seg + + start_index / HNS_ROCE_MTT_ENTRY_PER_SEG, &dma_handle); if (!mtts) return -ENOMEM; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 57c76eafef2f..66cdf625534f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -274,9 +274,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) wait_for_completion(&hr_qp->free); if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { - if (hr_dev->caps.sccc_entry_sz) - hns_roce_table_put(hr_dev, &qp_table->sccc_table, - hr_qp->qpn); if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index c20bfc41ecf1..0aa10ebda5d9 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; - u64 access_mask = ODP_READ_ALLOWED_BIT; + u64 access_mask; u64 start_idx, page_mask; struct ib_umem_odp *odp; size_t size; @@ -607,6 +607,7 @@ next_mr: page_shift = mr->umem->page_shift; page_mask = ~(BIT(page_shift) - 1); start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; + access_mask = ODP_READ_ALLOWED_BIT; if (prefetch && !downgrade && !mr->umem->writable) { /* prefetch with write-access must diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 6d8b3e0de57a..ec41400fec0c 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -1131,6 +1131,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev) pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); pvrdma_page_dir_cleanup(dev, &dev->async_pdir); pvrdma_free_slots(dev); + dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, + dev->dsrbase); iounmap(dev->regs); kfree(dev->sgid_tbl); diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index effb63205d3d..4c67cf30a5d9 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c @@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) return error; } + pdata->input = input; + platform_set_drvdata(pdev, pdata); + error = devm_request_irq(&pdev->dev, pdata->irq, imx_snvs_pwrkey_interrupt, 0, pdev->name, pdev); @@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) return error; } - pdata->input = input; - platform_set_drvdata(pdev, pdata); - device_init_wakeup(&pdev->dev, pdata->wakeup); return 0; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 628ef617bb2f..f9525d6f0bfe 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0600", 0 }, { "ELAN0601", 0 }, { "ELAN0602", 0 }, + { "ELAN0603", 0 }, + { "ELAN0604", 0 }, { "ELAN0605", 0 }, + { "ELAN0606", 0 }, + { "ELAN0607", 0 }, { "ELAN0608", 0 }, { "ELAN0609", 0 }, { "ELAN060B", 0 }, { "ELAN060C", 0 }, + { "ELAN060F", 0 }, + { "ELAN0610", 0 }, { "ELAN0611", 0 }, { "ELAN0612", 0 }, + { "ELAN0615", 0 }, + { "ELAN0616", 0 }, { "ELAN0617", 0 }, { "ELAN0618", 0 }, + { "ELAN0619", 0 }, + { "ELAN061A", 0 }, + { "ELAN061B", 0 }, { "ELAN061C", 0 }, { "ELAN061D", 0 }, { "ELAN061E", 0 }, + { "ELAN061F", 0 }, { "ELAN0620", 0 }, { "ELAN0621", 0 }, { "ELAN0622", 0 }, + { "ELAN0623", 0 }, + { "ELAN0624", 0 }, + { "ELAN0625", 0 }, + { "ELAN0626", 0 }, + { "ELAN0627", 0 }, + { "ELAN0628", 0 }, + { "ELAN0629", 0 }, + { "ELAN062A", 0 }, + { "ELAN062B", 0 }, + { "ELAN062C", 0 }, + { "ELAN062D", 0 }, + { "ELAN0631", 0 }, + { "ELAN0632", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 1b1378619fc9..ff40ba758cf3 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -359,7 +359,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) static void iommu_set_exclusion_range(struct amd_iommu *iommu) { u64 start = iommu->exclusion_start & PAGE_MASK; - u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; + u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK; u64 entry; if (!iommu->exclusion_start) diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c index 86b72fbd3b45..353111a10413 100644 --- a/drivers/irqchip/irq-ls1x.c +++ b/drivers/irqchip/irq-ls1x.c @@ -130,6 +130,7 @@ static int __init ls1x_intc_of_init(struct device_node *node, NULL); if (!priv->domain) { pr_err("ls1x-irq: cannot add IRQ domain\n"); + err = -ENOMEM; goto out_iounmap; } diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 4ab8b1b6608f..a14e35d40538 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -710,10 +710,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) struct sock *sk = sock->sk; int err = 0; - if (!maddr || maddr->family != AF_ISDN) + if (addr_len < sizeof(struct sockaddr_mISDN)) return -EINVAL; - if (addr_len < sizeof(struct sockaddr_mISDN)) + if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 3789185144da..0b7d5fb4548d 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -231,14 +231,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) struct pblk_sec_meta *meta; struct bio *new_bio = rqd->bio; struct bio *bio = pr_ctx->orig_bio; - struct bio_vec src_bv, dst_bv; void *meta_list = rqd->meta_list; - int bio_init_idx = pr_ctx->bio_init_idx; unsigned long *read_bitmap = pr_ctx->bitmap; + struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT; + struct bvec_iter new_iter = BVEC_ITER_ALL_INIT; int nr_secs = pr_ctx->orig_nr_secs; int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); void *src_p, *dst_p; - int hole, i; + int bit, i; if (unlikely(nr_holes == 1)) { struct ppa_addr ppa; @@ -257,33 +257,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd) /* Fill the holes in the original bio */ i = 0; - hole = find_first_zero_bit(read_bitmap, nr_secs); - do { - struct pblk_line *line; + for (bit = 0; bit < nr_secs; bit++) { + if (!test_bit(bit, read_bitmap)) { + struct bio_vec dst_bv, src_bv; + struct pblk_line *line; - line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); - kref_put(&line->ref, pblk_line_put); + line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]); + kref_put(&line->ref, pblk_line_put); - meta = pblk_get_meta(pblk, meta_list, hole); - meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); + meta = pblk_get_meta(pblk, meta_list, bit); + meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]); - src_bv = new_bio->bi_io_vec[i++]; - dst_bv = bio->bi_io_vec[bio_init_idx + hole]; + dst_bv = bio_iter_iovec(bio, orig_iter); + src_bv = bio_iter_iovec(new_bio, new_iter); - src_p = kmap_atomic(src_bv.bv_page); - dst_p = kmap_atomic(dst_bv.bv_page); + src_p = kmap_atomic(src_bv.bv_page); + dst_p = kmap_atomic(dst_bv.bv_page); - memcpy(dst_p + dst_bv.bv_offset, - src_p + src_bv.bv_offset, - PBLK_EXPOSED_PAGE_SIZE); + memcpy(dst_p + dst_bv.bv_offset, + src_p + src_bv.bv_offset, + PBLK_EXPOSED_PAGE_SIZE); - kunmap_atomic(src_p); - kunmap_atomic(dst_p); + kunmap_atomic(src_p); + kunmap_atomic(dst_p); - mempool_free(src_bv.bv_page, &pblk->page_bio_pool); + flush_dcache_page(dst_bv.bv_page); + mempool_free(src_bv.bv_page, &pblk->page_bio_pool); - hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1); - } while (hole < nr_secs); + bio_advance_iter(new_bio, &new_iter, + PBLK_EXPOSED_PAGE_SIZE); + i++; + } + bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE); + } bio_put(new_bio); kfree(pr_ctx); diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 95c6d86ab5e8..c4ef1fceead6 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -115,6 +115,7 @@ struct mapped_device { struct srcu_struct io_barrier; }; +void disable_discard(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index b53f30f16b4d..4b76f84424c3 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c @@ -36,7 +36,7 @@ struct dm_device { struct list_head list; }; -const char *dm_allowed_targets[] __initconst = { +const char * const dm_allowed_targets[] __initconst = { "crypt", "delay", "linear", diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index d57d997a52c8..7c678f50aaa3 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) { return range1->logical_sector < range2->logical_sector + range2->n_sectors && - range2->logical_sector + range2->n_sectors > range2->logical_sector; + range1->logical_sector + range1->n_sectors > range2->logical_sector; } static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) @@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity struct dm_integrity_range *last_range = list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); struct task_struct *last_range_task; - if (!ranges_overlap(range, last_range)) - break; last_range_task = last_range->task; list_del(&last_range->wait_entry); if (!add_new_range(ic, last_range, false)) { @@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) journal_watermark = val; else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) sync_msec = val; - else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { + else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) { if (ic->meta_dev) { dm_put_device(ti, ic->meta_dev); ic->meta_dev = NULL; @@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } ic->sectors_per_block = val >> SECTOR_SHIFT; - } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { + } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, "Invalid internal_hash argument"); if (r) goto bad; - } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { + } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, "Invalid journal_crypt argument"); if (r) goto bad; - } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { + } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, "Invalid journal_mac argument"); if (r) @@ -3616,7 +3614,7 @@ static struct target_type integrity_target = { .io_hints = dm_integrity_io_hints, }; -int __init dm_integrity_init(void) +static int __init dm_integrity_init(void) { int r; @@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void) return r; } -void dm_integrity_exit(void) +static void __exit dm_integrity_exit(void) { dm_unregister_target(&integrity_target); kmem_cache_destroy(journal_io_cache); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 09773636602d..b66745bd08bb 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) } if (unlikely(error == BLK_STS_TARGET)) { - if (req_op(clone) == REQ_OP_WRITE_SAME && - !clone->q->limits.max_write_same_sectors) + if (req_op(clone) == REQ_OP_DISCARD && + !clone->q->limits.max_discard_sectors) + disable_discard(tio->md); + else if (req_op(clone) == REQ_OP_WRITE_SAME && + !clone->q->limits.max_write_same_sectors) disable_write_same(tio->md); - if (req_op(clone) == REQ_OP_WRITE_ZEROES && - !clone->q->limits.max_write_zeroes_sectors) + else if (req_op(clone) == REQ_OP_WRITE_ZEROES && + !clone->q->limits.max_write_zeroes_sectors) disable_write_zeroes(tio->md); } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ba9481f1bf3c..cde3b49b2a91 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t) return true; } +static int device_requires_stable_pages(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && bdi_cap_stable_pages_required(q->backing_dev_info); +} + +/* + * If any underlying device requires stable pages, a table must require + * them as well. Only targets that support iterate_devices are considered: + * don't want error, zero, etc to require stable pages. + */ +static bool dm_table_requires_stable_pages(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) + return true; + } + + return false; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, dm_table_verify_integrity(t); /* + * Some devices don't use blk_integrity but still want stable pages + * because they do their own checksumming. + */ + if (dm_table_requires_stable_pages(t)) + q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + else + q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; + + /* * Determine whether or not this queue's I/O timings contribute * to the entropy pool, Only request-based targets use this. * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68d24056d0b1..043f0761e4a0 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error) } } +void disable_discard(struct mapped_device *md) +{ + struct queue_limits *limits = dm_get_queue_limits(md); + + /* device doesn't really support DISCARD, disable it */ + limits->max_discard_sectors = 0; + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); +} + void disable_write_same(struct mapped_device *md) { struct queue_limits *limits = dm_get_queue_limits(md); @@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio) dm_endio_fn endio = tio->ti->type->end_io; if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { - if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bio->bi_disk->queue->limits.max_write_same_sectors) + if (bio_op(bio) == REQ_OP_DISCARD && + !bio->bi_disk->queue->limits.max_discard_sectors) + disable_discard(md); + else if (bio_op(bio) == REQ_OP_WRITE_SAME && + !bio->bi_disk->queue->limits.max_write_same_sectors) disable_write_same(md); - if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } @@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) return -EINVAL; } - /* - * BIO based queue uses its own splitting. When multipage bvecs - * is switched on, size of the incoming bio may be too big to - * be handled in some targets, such as crypt. - * - * When these targets are ready for the big bio, we can remove - * the limit. - */ - ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE); + ti->max_io_len = (uint32_t) len; return 0; } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 0ce2d8dfc5f1..26ad6468d13a 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1246,7 +1246,7 @@ config MFD_STA2X11 config MFD_SUN6I_PRCM bool "Allwinner A31 PRCM controller" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST select MFD_CORE help Support for the PRCM (Power/Reset/Clock Management) unit available diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index 69df27769c21..43ac71691fe4 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = { static const struct mfd_cell sprd_pmic_devs[] = { { .name = "sc27xx-wdt", - .of_compatible = "sprd,sc27xx-wdt", + .of_compatible = "sprd,sc2731-wdt", }, { .name = "sc27xx-rtc", - .of_compatible = "sprd,sc27xx-rtc", + .of_compatible = "sprd,sc2731-rtc", }, { .name = "sc27xx-charger", - .of_compatible = "sprd,sc27xx-charger", + .of_compatible = "sprd,sc2731-charger", }, { .name = "sc27xx-chg-timer", - .of_compatible = "sprd,sc27xx-chg-timer", + .of_compatible = "sprd,sc2731-chg-timer", }, { .name = "sc27xx-fast-chg", - .of_compatible = "sprd,sc27xx-fast-chg", + .of_compatible = "sprd,sc2731-fast-chg", }, { .name = "sc27xx-chg-wdt", - .of_compatible = "sprd,sc27xx-chg-wdt", + .of_compatible = "sprd,sc2731-chg-wdt", }, { .name = "sc27xx-typec", - .of_compatible = "sprd,sc27xx-typec", + .of_compatible = "sprd,sc2731-typec", }, { .name = "sc27xx-flash", - .of_compatible = "sprd,sc27xx-flash", + .of_compatible = "sprd,sc2731-flash", }, { .name = "sc27xx-eic", - .of_compatible = "sprd,sc27xx-eic", + .of_compatible = "sprd,sc2731-eic", }, { .name = "sc27xx-efuse", - .of_compatible = "sprd,sc27xx-efuse", + .of_compatible = "sprd,sc2731-efuse", }, { .name = "sc27xx-thermal", - .of_compatible = "sprd,sc27xx-thermal", + .of_compatible = "sprd,sc2731-thermal", }, { .name = "sc27xx-adc", - .of_compatible = "sprd,sc27xx-adc", + .of_compatible = "sprd,sc2731-adc", }, { .name = "sc27xx-audio-codec", - .of_compatible = "sprd,sc27xx-audio-codec", + .of_compatible = "sprd,sc2731-audio-codec", }, { .name = "sc27xx-regulator", - .of_compatible = "sprd,sc27xx-regulator", + .of_compatible = "sprd,sc2731-regulator", }, { .name = "sc27xx-vibrator", - .of_compatible = "sprd,sc27xx-vibrator", + .of_compatible = "sprd,sc2731-vibrator", }, { .name = "sc27xx-keypad-led", - .of_compatible = "sprd,sc27xx-keypad-led", + .of_compatible = "sprd,sc2731-keypad-led", }, { .name = "sc27xx-bltc", - .of_compatible = "sprd,sc27xx-bltc", + .of_compatible = "sprd,sc2731-bltc", }, { .name = "sc27xx-fgu", - .of_compatible = "sprd,sc27xx-fgu", + .of_compatible = "sprd,sc2731-fgu", }, { .name = "sc27xx-7sreset", - .of_compatible = "sprd,sc27xx-7sreset", + .of_compatible = "sprd,sc2731-7sreset", }, { .name = "sc27xx-poweroff", - .of_compatible = "sprd,sc27xx-poweroff", + .of_compatible = "sprd,sc2731-poweroff", }, { .name = "sc27xx-syscon", - .of_compatible = "sprd,sc27xx-syscon", + .of_compatible = "sprd,sc2731-syscon", }, }; diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 299016bc46d9..104477b512a2 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -1245,6 +1245,28 @@ free: return status; } +static int __maybe_unused twl_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + disable_irq(client->irq); + + return 0; +} + +static int __maybe_unused twl_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (client->irq) + enable_irq(client->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume); + static const struct i2c_device_id twl_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ @@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = { /* One Client Driver , 4 Clients */ static struct i2c_driver twl_driver = { .driver.name = DRIVER_NAME, + .driver.pm = &twl_dev_pm_ops, .id_table = twl_ids, .probe = twl_probe, .remove = twl_remove, diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 39f832d27288..36d0d5c9cfba 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1184,6 +1184,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev) struct fastrpc_session_ctx *sess; struct device *dev = &pdev->dev; int i, sessions = 0; + int rc; cctx = dev_get_drvdata(dev->parent); if (!cctx) @@ -1213,7 +1214,11 @@ static int fastrpc_cb_probe(struct platform_device *pdev) } cctx->sesscount++; spin_unlock(&cctx->lock); - dma_set_mask(dev, DMA_BIT_MASK(32)); + rc = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(dev, "32-bit DMA enable failed\n"); + return rc; + } return 0; } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index ea979ebd62fb..3c509e19d69d 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -1688,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device *hdev) /* * Workaround for H2 #HW-23 bug - * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it - * to 16 on KMD DMA - * We need to limit only these DMAs because the user can only read + * Set DMA max outstanding read requests to 240 on DMA CH 1. + * This limitation is still large enough to not affect Gen4 bandwidth. + * We need to only limit that DMA channel because the user can only read * from Host using DMA CH 1 */ - WREG32(mmDMA_CH_0_CFG0, 0x0fff0010); WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0); goya->hw_cap_initialized |= HW_CAP_GOLDEN; @@ -3693,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev, * WA for HW-23. * We can't allow user to read from Host using QMANs other than 1. */ - if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 && + if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 && hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr), le32_to_cpu(user_dma_pkt->tsize), hdev->asic_prop.va_space_host_start_address, diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c index 82a97866e0cf..7c8f203f9a24 100644 --- a/drivers/mmc/host/alcor.c +++ b/drivers/mmc/host/alcor.c @@ -48,7 +48,6 @@ struct alcor_sdmmc_host { struct mmc_command *cmd; struct mmc_data *data; unsigned int dma_on:1; - unsigned int early_data:1; struct mutex cmd_mutex; @@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host) host->sg_count--; } -static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, - bool early) +static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host) { struct alcor_pci_priv *priv = host->alcor_pci; struct mmc_data *data = host->data; @@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host, ctrl |= AU6601_DATA_WRITE; if (data->host_cookie == COOKIE_MAPPED) { - if (host->early_data) { - host->early_data = false; - return; - } - - host->early_data = early; - alcor_data_set_dma(host); ctrl |= AU6601_DATA_DMA_MODE; host->dma_on = 1; @@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host) static void alcor_prepare_data(struct alcor_sdmmc_host *host, struct mmc_command *cmd) { + struct alcor_pci_priv *priv = host->alcor_pci; struct mmc_data *data = cmd->data; if (!data) @@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host, if (data->host_cookie != COOKIE_MAPPED) alcor_prepare_sg_miter(host); - alcor_trigger_data_transfer(host, true); + alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL); } static void alcor_send_cmd(struct alcor_sdmmc_host *host, @@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask) if (!host->data) return false; - alcor_trigger_data_transfer(host, false); + alcor_trigger_data_transfer(host); host->cmd = NULL; return true; } @@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask) if (!host->data) alcor_request_complete(host, 1); else - alcor_trigger_data_transfer(host, false); + alcor_trigger_data_transfer(host); host->cmd = NULL; } @@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) break; case AU6601_INT_READ_BUF_RDY: alcor_trf_block_pio(host, true); - if (!host->blocks) - break; - alcor_trigger_data_transfer(host, false); return 1; case AU6601_INT_WRITE_BUF_RDY: alcor_trf_block_pio(host, false); - if (!host->blocks) - break; - alcor_trigger_data_transfer(host, false); return 1; case AU6601_INT_DMA_END: if (!host->sg_count) @@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask) break; } - if (intmask & AU6601_INT_DATA_END) - return 0; + if (intmask & AU6601_INT_DATA_END) { + if (!host->dma_on && host->blocks) { + alcor_trigger_data_transfer(host); + return 1; + } else { + return 0; + } + } return 1; } diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 5bbed477c9b1..9f20fff9781b 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask) sdhci_reset(host, mask); } +#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\ + SDHCI_INT_TIMEOUT) +#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE) + +static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + if (omap_host->is_tuning && host->cmd && !host->data_early && + (intmask & CMD_ERR_MASK)) { + + /* + * Since we are not resetting data lines during tuning + * operation, data error or data complete interrupts + * might still arrive. Mark this request as a failure + * but still wait for the data interrupt + */ + if (intmask & SDHCI_INT_TIMEOUT) + host->cmd->error = -ETIMEDOUT; + else + host->cmd->error = -EILSEQ; + + host->cmd = NULL; + + /* + * Sometimes command error interrupts and command complete + * interrupt will arrive together. Clear all command related + * interrupts here. + */ + sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS); + intmask &= ~CMD_MASK; + } + + return intmask; +} + static struct sdhci_ops sdhci_omap_ops = { .set_clock = sdhci_omap_set_clock, .set_power = sdhci_omap_set_power, @@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = { .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, .reset = sdhci_omap_reset, .set_uhs_signaling = sdhci_omap_set_uhs_signaling, + .irq = sdhci_omap_irq, }; static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 72428b6bfc47..7b7286b4d81e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, continue; } - if (time_after(jiffies, timeo) && !chip_ready(map, adr)) + /* + * We check "time_after" and "!chip_good" before checking "chip_good" to avoid + * the failure due to scheduling. + */ + if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) break; if (chip_good(map, adr, datum)) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b59708c35faf..ee610721098e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this, return NOTIFY_DONE; if (event_dev->flags & IFF_MASTER) { + int ret; + netdev_dbg(event_dev, "IFF_MASTER\n"); - return bond_master_netdev_event(event, event_dev); + ret = bond_master_netdev_event(event, event_dev); + if (ret != NOTIFY_DONE) + return ret; } if (event_dev->flags & IFF_SLAVE) { diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 2f120b2ffef0..4985268e2273 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count); static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) { - return sprintf(buf, "%pM\n", slave->perm_hwaddr); + return sprintf(buf, "%*phC\n", + slave->dev->addr_len, + slave->perm_hwaddr); } static SLAVE_ATTR_RO(perm_hwaddr); diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index dce84a2a65c7..c44b2822e4dd 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; lane = mv88e6390x_serdes_get_lane(chip, port); - if (lane < 0) + if (lane < 0 && lane != -ENODEV) return lane; - if (chip->ports[port].serdes_irq) { - err = mv88e6390_serdes_irq_disable(chip, port, lane); + if (lane >= 0) { + if (chip->ports[port].serdes_irq) { + err = mv88e6390_serdes_irq_disable(chip, port, lane); + if (err) + return err; + } + + err = mv88e6390x_serdes_power(chip, port, false); if (err) return err; } - err = mv88e6390x_serdes_power(chip, port, false); - if (err) - return err; + chip->ports[port].cmode = 0; if (cmode) { err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); @@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; + chip->ports[port].cmode = cmode; + + lane = mv88e6390x_serdes_get_lane(chip, port); + if (lane < 0) + return lane; + err = mv88e6390x_serdes_power(chip, port, true); if (err) return err; @@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, } } - chip->ports[port].cmode = cmode; - return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index a9bdc21873d3..10ff37d6dc78 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) bnx2x_sample_bulletin(bp); if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { - BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); + BNX2X_ERR("Hypervisor will decline the request, avoiding\n"); rc = -EINVAL; goto out; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0bb9d7b3a2b6..4c586ba4364b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, tpa_info = &rxr->rx_tpa[agg_id]; if (unlikely(cons != rxr->rx_next_cons)) { + netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return; } @@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } cons = rxcmp->rx_cmp_opaque; - rx_buf = &rxr->rx_buf_ring[cons]; - data = rx_buf->data; - data_ptr = rx_buf->data_ptr; if (unlikely(cons != rxr->rx_next_cons)) { int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp); + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); bnxt_sched_reset(bp, rxr); return rc1; } + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data; + data_ptr = rx_buf->data_ptr; prefetch(data_ptr); misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); @@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rx_buf->data = NULL; if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); + bnxt_reuse_rx_data(rxr, cons, data); if (agg_bufs) bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs); rc = -EIO; + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); + bnxt_sched_reset(bp, rxr); + } goto next_rx; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 328373e0578f..060a6f386104 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp) pci_set_power_state(tp->pdev, PCI_D3hot); } -static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex) { switch (val & MII_TG3_AUX_STAT_SPDMASK) { case MII_TG3_AUX_STAT_10HALF: @@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) bool current_link_up; u32 bmsr, val; u32 lcl_adv, rmt_adv; - u16 current_speed; + u32 current_speed; u8 current_duplex; int i, err; @@ -5719,7 +5719,7 @@ out: static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) { u32 orig_pause_cfg; - u16 orig_active_speed; + u32 orig_active_speed; u8 orig_active_duplex; u32 mac_status; bool current_link_up; @@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) { int err = 0; u32 bmsr, bmcr; - u16 current_speed = SPEED_UNKNOWN; + u32 current_speed = SPEED_UNKNOWN; u8 current_duplex = DUPLEX_UNKNOWN; bool current_link_up = false; u32 local_adv, remote_adv, sgsr; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index a772a33b685c..6953d0546acb 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info { struct tg3_link_config { /* Describes what we're trying to get. */ u32 advertising; - u16 speed; + u32 speed; u8 duplex; u8 autoneg; u8 flowctrl; @@ -2882,7 +2882,7 @@ struct tg3_link_config { u8 active_flowctrl; u8 active_duplex; - u16 active_speed; + u32 active_speed; u32 rmt_adv; }; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1522aee81884..3da2795e2486 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) /* First, update TX stats if needed */ if (skb) { - if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP) && + gem_ptp_do_txstamp(queue, skb, desc) == 0) { /* skb now belongs to timestamp buffer * and will be removed later */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index aa2be4807191..c032bef1b776 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -32,6 +32,13 @@ #define DRV_NAME "nicvf" #define DRV_VERSION "1.0" +/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs + * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed + * this value, keeping headroom for the 14 byte Ethernet header and two + * VLAN tags (for QinQ) + */ +#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2) + /* Supported devices */ static const struct pci_device_id nicvf_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, @@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; union nic_mbx mbx = {}; - cancel_delayed_work_sync(&nic->link_change_work); - /* wait till all queued set_rx_mode tasks completes */ - drain_workqueue(nic->nicvf_rx_mode_wq); + if (nic->nicvf_rx_mode_wq) { + cancel_delayed_work_sync(&nic->link_change_work); + drain_workqueue(nic->nicvf_rx_mode_wq); + } mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; nicvf_send_msg_to_pf(nic, &mbx); @@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; /* wait till all queued set_rx_mode tasks completes if any */ - drain_workqueue(nic->nicvf_rx_mode_wq); + if (nic->nicvf_rx_mode_wq) + drain_workqueue(nic->nicvf_rx_mode_wq); netif_carrier_off(netdev); @@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev) /* Send VF config done msg to PF */ nicvf_send_cfg_done(nic); - INIT_DELAYED_WORK(&nic->link_change_work, - nicvf_link_status_check_task); - queue_delayed_work(nic->nicvf_rx_mode_wq, - &nic->link_change_work, 0); + if (nic->nicvf_rx_mode_wq) { + INIT_DELAYED_WORK(&nic->link_change_work, + nicvf_link_status_check_task); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 0); + } return 0; cleanup: @@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) struct nicvf *nic = netdev_priv(netdev); int orig_mtu = netdev->mtu; + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) { + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + netdev->mtu); + return -EINVAL; + } + netdev->mtu = new_mtu; if (!netif_running(netdev)) @@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) bool bpf_attached = false; int ret = 0; - /* For now just support only the usual MTU sized frames */ - if (prog && (dev->mtu > 1500)) { + /* For now just support only the usual MTU sized frames, + * plus some headroom for VLAN, QinQ. + */ + if (prog && dev->mtu > MAX_XDP_MTU) { netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", dev->mtu); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 5b4d3badcb73..e246f9733bb8 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic, /* Check if page can be recycled */ if (page) { ref_count = page_ref_count(page); - /* Check if this page has been used once i.e 'put_page' - * called after packet transmission i.e internal ref_count - * and page's ref_count are equal i.e page can be recycled. + /* This page can be recycled if internal ref_count and page's + * ref_count are equal, indicating that the page has been used + * once for packet transmission. For non-XDP mode, internal + * ref_count is always '1'. */ - if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) - pgcache->ref_count--; - else - page = NULL; - - /* In non-XDP mode, page's ref_count needs to be '1' for it - * to be recycled. - */ - if (!rbdr->is_xdp && (ref_count != 1)) + if (rbdr->is_xdp) { + if (ref_count == pgcache->ref_count) + pgcache->ref_count--; + else + page = NULL; + } else if (ref_count != 1) { page = NULL; + } } if (!page) { @@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) while (head < rbdr->pgcnt) { pgcache = &rbdr->pgcache[head]; if (pgcache->page && page_ref_count(pgcache->page) != 0) { - if (!rbdr->is_xdp) { - put_page(pgcache->page); - continue; + if (rbdr->is_xdp) { + page_ref_sub(pgcache->page, + pgcache->ref_count - 1); } - page_ref_sub(pgcache->page, pgcache->ref_count - 1); put_page(pgcache->page); } head++; diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c index 74849be5f004..e2919005ead3 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, ppmax = max; /* pool size must be multiple of unsigned long */ - bmap = BITS_TO_LONGS(ppmax); + bmap = ppmax / BITS_PER_TYPE(unsigned long); + if (!bmap) + return NULL; + ppmax = (bmap * sizeof(unsigned long)) << 3; alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; @@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, if (reserve_factor) { ppmax_pool = ppmax / reserve_factor; pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); + if (!pool) { + ppmax_pool = 0; + reserve_factor = 0; + } pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", ndev->name, ppmax, ppmax_pool, pool_index_max); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 697c2427f2b7..a96ad20ee484 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) int ret; if (enable) { - ret = clk_prepare_enable(fep->clk_ahb); - if (ret) - return ret; - ret = clk_prepare_enable(fep->clk_enet_out); if (ret) - goto failed_clk_enet_out; + return ret; if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) phy_reset_after_clk_enable(ndev->phydev); } else { - clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1885,8 +1880,6 @@ failed_clk_ref: failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); -failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ahb); return ret; } @@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev) ret = clk_prepare_enable(fep->clk_ipg); if (ret) goto failed_clk_ipg; + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + goto failed_clk_ahb; fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { @@ -3563,6 +3559,9 @@ failed_reset: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); failed_regulator: + clk_disable_unprepare(fep->clk_ahb); +failed_clk_ahb: + clk_disable_unprepare(fep->clk_ipg); failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: @@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); return 0; @@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; - return clk_prepare_enable(fep->clk_ipg); + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + + return 0; + +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + return ret; } static const struct dev_pm_ops fec_pm_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 79d03f8ee7b1..c7fa97a7e1f4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -150,7 +150,6 @@ out_buffer_fail: /* free desc along with its attached buffer */ static void hnae_free_desc(struct hnae_ring *ring) { - hnae_free_buffers(ring); dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, ring->desc_num * sizeof(ring->desc[0]), ring_to_dma_dir(ring)); @@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring) /* fini ring, also free the buffer for the ring */ static void hnae_fini_ring(struct hnae_ring *ring) { + if (is_rx_ring(ring)) + hnae_free_buffers(ring); + hnae_free_desc(ring); kfree(ring->desc_cb); ring->desc_cb = NULL; diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 08a750fb60c4..d6fb83437230 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -357,7 +357,7 @@ struct hnae_buf_ops { }; struct hnae_queue { - void __iomem *io_base; + u8 __iomem *io_base; phys_addr_t phy_base; struct hnae_ae_dev *dev; /* the device who use this queue */ struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a97228c93831..6c0507921623 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn) static void hns_mac_param_get(struct mac_params *param, struct hns_mac_cb *mac_cb) { - param->vaddr = (void *)mac_cb->vaddr; + param->vaddr = mac_cb->vaddr; param->mac_mode = hns_get_enet_interface(mac_cb); ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); param->mac_id = mac_cb->mac_id; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index fbc75341bef7..22589799f1a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -187,7 +187,7 @@ struct mac_statistics { /*mac para struct ,mac get param from nic or dsaf when initialize*/ struct mac_params { char addr[ETH_ALEN]; - void *vaddr; /*virtual address*/ + u8 __iomem *vaddr; /*virtual address*/ struct device *dev; u8 mac_id; /**< Ethernet operation mode (MAC-PHY interface and speed) */ @@ -402,7 +402,7 @@ struct mac_driver { enum mac_mode mac_mode; u8 mac_id; struct hns_mac_cb *mac_cb; - void __iomem *io_base; + u8 __iomem *io_base; unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ unsigned int virt_dev_num; struct device *dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index ac55db065f16..61eea6ac846f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key( DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, port); - - mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan); } /** @@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry( /* default config dvc to 0 */ mac_data.tbl_ucast_dvc = 0; mac_data.tbl_ucast_out_port = mac_entry->port_num; - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); @@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, 0xff, mc_mask); - mask_key.high.val = le32_to_cpu(mask_key.high.val); - mask_key.low.val = le32_to_cpu(mask_key.low.val); - pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } @@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, dsaf_dev->ae_dev.name, mac_key.high.val, mac_key.low.val, entry_index); - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; /* config mc entry with mask */ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, @@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /* config key mask */ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); - mask_key.high.val = le32_to_cpu(mask_key.high.val); - mask_key.low.val = le32_to_cpu(mask_key.low.val); - pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); } @@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, soft_mac_entry += entry_index; soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; } else { /* not zero, just del port, update */ - tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); - tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); + tcam_data.tbl_tcam_data_high = mac_key.high.val; + tcam_data.tbl_tcam_data_low = mac_key.low.val; hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, @@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void) return DSAF_DUMP_REGS_NUM; } +static int hns_dsaf_get_port_id(u8 port) +{ + if (port < DSAF_SERVICE_NW_NUM) + return port; + + if (port >= DSAF_BASE_INNER_PORT_NUM) + return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; + + return -EINVAL; +} + static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) { struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; @@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) memset(&temp_key, 0x0, sizeof(temp_key)); mask_entry.addr[0] = 0x01; hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, - port, mask_entry.addr); + 0xf, mask_entry.addr); tbl_tcam_mcast.tbl_mcast_item_vld = 1; tbl_tcam_mcast.tbl_mcast_old_en = 0; - if (port < DSAF_SERVICE_NW_NUM) { - mskid = port; - } else if (port >= DSAF_BASE_INNER_PORT_NUM) { - mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM; - } else { + /* set MAC port to handle multicast */ + mskid = hns_dsaf_get_port_id(port); + if (mskid == -EINVAL) { dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", dsaf_dev->ae_dev.name, port, mask_key.high.val, mask_key.low.val); return; } + dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], + mskid % 32, 1); + /* set pool bit map to handle multicast */ + mskid = hns_dsaf_get_port_id(port_num); + if (mskid == -EINVAL) { + dev_err(dsaf_dev->dev, + "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n", + dsaf_dev->ae_dev.name, port_num, + mask_key.high.val, mask_key.low.val); + return; + } dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1); + memcpy(&temp_key, &mask_key, sizeof(mask_key)); hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, (struct dsaf_tbl_tcam_data *)(&mask_key), diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 0e1cd99831a6..76cc8887e1a8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id, u8 port_num); int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); +int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); + #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 16294cd3c954..19b94879691f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) dsaf_set_field(origin, 1ull << 10, 10, en); dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); } else { - u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + + u8 __iomem *base_addr = mac_cb->serdes_vaddr + (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 3d07c8a7639d..17c019106e6e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, } } -static void __iomem * +static u8 __iomem * hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) { return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; @@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) dsaf_dev->ppe_common[comm_index] = NULL; } -static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, - int ppe_idx) +static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, + int ppe_idx) { return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index f670e63a5a01..110c6e8222c7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -80,7 +80,7 @@ struct hns_ppe_cb { struct hns_ppe_hw_stats hw_stats; u8 index; /* index in a ppe common device */ - void __iomem *io_base; + u8 __iomem *io_base; int virq; u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ @@ -89,7 +89,7 @@ struct hns_ppe_cb { struct ppe_common_cb { struct device *dev; struct dsaf_device *dsaf_dev; - void __iomem *io_base; + u8 __iomem *io_base; enum ppe_common_mode ppe_mode; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6bf346c11b25..ac3518ca4d7b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; } else { ring = &q->tx_ring; - ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + + ring->io_base = ring_pair_cb->q.io_base + HNS_RCB_TX_REG_OFFSET; irq_idx = HNS_RCB_IRQ_IDX_TX; mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : @@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) } } -static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) +static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) { struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b9733b0b8482..b9e7f11f0896 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -1018,7 +1018,7 @@ #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 -static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) +static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value) { writel(value, base + reg); } @@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val) #define dsaf_set_bit(origin, shift, val) \ dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) -static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, +static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, u32 val) { u32 origin = dsaf_read_reg(base, reg); @@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, #define dsaf_get_bit(origin, shift) \ dsaf_get_field((origin), (1ull << (shift)), (shift)) -static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, +static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) { u32 origin; @@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) #define dsaf_write_b(addr, data)\ - writeb((data), (__iomem unsigned char *)(addr)) + writeb((data), (__iomem u8 *)(addr)) #define dsaf_read_b(addr)\ - readb((__iomem unsigned char *)(addr)) + readb((__iomem u8 *)(addr)) #define hns_mac_reg_read64(drv, offset) \ - readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) + readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset)))) #endif /* _DSAF_REG_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index ba4316910dea..a60f207768fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); - dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); + dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); } /** diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 60e7d7ae3787..4cd86ba1f050 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -29,9 +29,6 @@ #define SERVICE_TIMER_HZ (1 * HZ) -#define NIC_TX_CLEAN_MAX_NUM 256 -#define NIC_RX_CLEAN_MAX_NUM 64 - #define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_INITED 1 #define HNS_BUFFER_SIZE_2048 2048 @@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, wmb(); /* commit all data before submit */ assert(skb->queue_mapping < priv->ae_handle->q_num); hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); - ring->stats.tx_pkts++; - ring->stats.tx_bytes += skb->len; return NETDEV_TX_OK; @@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, /* issue prefetch for next Tx descriptor */ prefetch(&ring->desc_cb[ring->next_to_clean]); } + /* update tx ring statistics. */ + ring->stats.tx_pkts += pkts; + ring->stats.tx_bytes += bytes; NETIF_TX_UNLOCK(ring); @@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_tx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; } for (i = h->q_num; i < h->q_num * 2; i++) { @@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) hns_nic_rx_fini_pro_v2; netif_napi_add(priv->netdev, &rd->napi, - hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); + hns_nic_common_poll, NAPI_POLL_WEIGHT); rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index fffe8c1c45d3..0fb61d440d3b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -3,7 +3,7 @@ # Makefile for the HISILICON network device drivers. # -ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index fb93bbd35845..6193f8fa7cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -3,7 +3,7 @@ # Makefile for the HISILICON network device drivers. # -ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 +ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index baf5cc251f32..8b8a7d00e8e0 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -39,7 +39,7 @@ struct hns_mdio_sc_reg { }; struct hns_mdio_device { - void *vbase; /* mdio reg base address */ + u8 __iomem *vbase; /* mdio reg base address */ struct regmap *subctrl_vbase; struct hns_mdio_sc_reg sc_reg; }; @@ -96,21 +96,17 @@ enum mdio_c45_op_seq { #define MDIO_SC_CLK_ST 0x531C #define MDIO_SC_RESET_ST 0x5A1C -static void mdio_write_reg(void *base, u32 reg, u32 value) +static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value) { - u8 __iomem *reg_addr = (u8 __iomem *)base; - - writel_relaxed(value, reg_addr + reg); + writel_relaxed(value, base + reg); } #define MDIO_WRITE_REG(a, reg, value) \ mdio_write_reg((a)->vbase, (reg), (value)) -static u32 mdio_read_reg(void *base, u32 reg) +static u32 mdio_read_reg(u8 __iomem *base, u32 reg) { - u8 __iomem *reg_addr = (u8 __iomem *)base; - - return readl_relaxed(reg_addr + reg); + return readl_relaxed(base + reg); } #define mdio_set_field(origin, mask, shift, val) \ @@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg) #define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) -static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, +static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift, u32 val) { u32 origin = mdio_read_reg(base, reg); @@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, #define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) -static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) +static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift) { u32 origin; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5ecbb1adcf3b..3dfb2d131eb7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; + reinit_completion(&adapter->init_done); rc = init_crq_queue(adapter); if (rc) { netdev_err(adapter->netdev, @@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; + netdev_features_t old_hw_features = 0; union ibmvnic_crq crq; int i; @@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) adapter->ip_offload_ctrl.large_rx_ipv4 = 0; adapter->ip_offload_ctrl.large_rx_ipv6 = 0; - adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; + if (adapter->state != VNIC_PROBING) { + old_hw_features = adapter->netdev->hw_features; + adapter->netdev->hw_features = 0; + } + + adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) - adapter->netdev->features |= NETIF_F_IP_CSUM; + adapter->netdev->hw_features |= NETIF_F_IP_CSUM; if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) - adapter->netdev->features |= NETIF_F_IPV6_CSUM; + adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; if ((adapter->netdev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) - adapter->netdev->features |= NETIF_F_RXCSUM; + adapter->netdev->hw_features |= NETIF_F_RXCSUM; if (buf->large_tx_ipv4) - adapter->netdev->features |= NETIF_F_TSO; + adapter->netdev->hw_features |= NETIF_F_TSO; if (buf->large_tx_ipv6) - adapter->netdev->features |= NETIF_F_TSO6; + adapter->netdev->hw_features |= NETIF_F_TSO6; + + if (adapter->state == VNIC_PROBING) { + adapter->netdev->features |= adapter->netdev->hw_features; + } else if (old_hw_features != adapter->netdev->hw_features) { + netdev_features_t tmp = 0; - adapter->netdev->hw_features |= adapter->netdev->features; + /* disable features no longer supported */ + adapter->netdev->features &= adapter->netdev->hw_features; + /* turn on features now supported if previously enabled */ + tmp = (old_hw_features ^ adapter->netdev->hw_features) & + adapter->netdev->hw_features; + adapter->netdev->features |= + tmp & adapter->netdev->wanted_features; + } memset(&crq, 0, sizeof(crq)); crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; @@ -4625,7 +4644,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) old_num_rx_queues = adapter->req_rx_queues; old_num_tx_queues = adapter->req_tx_queues; - init_completion(&adapter->init_done); + reinit_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -4680,7 +4699,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) adapter->from_passive_init = false; - init_completion(&adapter->init_done); adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -4759,6 +4777,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_LIST_HEAD(&adapter->rwi_list); spin_lock_init(&adapter->rwi_lock); + init_completion(&adapter->init_done); adapter->resetting = false; adapter->mac_change_pending = false; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5a0419421511..ecef949f3baa 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) /* create driver workqueue */ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, fm10k_driver_name); + if (!fm10k_workqueue) + return -ENOMEM; fm10k_dbg_init(); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d684998ba2b0..d3cc3427caad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -790,6 +790,8 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) return !!vsi->xdp_prog; } -static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) -{ - bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); - int qid = ring->queue_index; - - if (ring_is_xdp(ring)) - qid -= ring->vsi->alloc_queue_pairs; - - if (!xdp_on) - return NULL; - - return xdp_get_umem_from_qid(ring->vsi->netdev, qid); -} - int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4c885801fa26..7874d0ec7fb0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; /* only magic packet is supported */ - if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) - | (wol->wolopts != WAKE_FILTER)) + if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; /* is this a new value? */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index da62218eb70a..b1c265012c8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) } /** + * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled + * @ring: The Tx or Rx ring + * + * Returns the UMEM or NULL. + **/ +static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) +{ + bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); + int qid = ring->queue_index; + + if (ring_is_xdp(ring)) + qid -= ring->vsi->alloc_queue_pairs; + + if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) + return NULL; + + return xdp_get_umem_from_qid(ring->vsi->netdev, qid); +} + +/** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure * @@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) hash_init(vsi->mac_filter_hash); vsi->irqs_ready = false; + if (type == I40E_VSI_MAIN) { + vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); + if (!vsi->af_xdp_zc_qps) + goto err_rings; + } + ret = i40e_set_num_rings_in_vsi(vsi); if (ret) goto err_rings; @@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) goto unlock_pf; err_rings: + bitmap_free(vsi->af_xdp_zc_qps); pf->next_vsi = i - 1; kfree(vsi); unlock_pf: @@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); + bitmap_free(vsi->af_xdp_zc_qps); i40e_vsi_free_arrays(vsi, true); i40e_clear_rss_config_user(vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5fb4353c742b..31575c0bb884 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now; + struct timespec64 now, then; + then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); i40e_ptp_read(pf, &now, NULL); - timespec64_add_ns(&now, delta); + now = timespec64_add(now, then); i40e_ptp_write(pf, (const struct timespec64 *)&now); mutex_unlock(&pf->tmreg_lock); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index b5c182e688e3..1b17486543ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, if (err) return err; + set_bit(qid, vsi->af_xdp_zc_qps); + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); if (if_running) { @@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) return err; } + clear_bit(qid, vsi->af_xdp_zc_qps); i40e_xsk_umem_dma_unmap(vsi, umem); if (if_running) { diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 01fcfc6f3415..d2e2c50ce257 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -194,6 +194,8 @@ /* enable link status from external LINK_0 and LINK_1 pins */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ #define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 69b230c53fed..3269d8e94744 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, struct e1000_hw *hw = &adapter->hw; u32 ctrl, rctl, status; u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif + bool wake; rtnl_lock(); netif_device_detach(netdev); @@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, igb_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - if (!runtime) { - retval = pci_save_state(pdev); - if (retval) - return retval; - } -#endif - status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, } ctrl = rd32(E1000_CTRL); - /* advertise wake from D3Cold */ - #define E1000_CTRL_ADVD3WUC 0x00100000 - /* phy power management enable */ - #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 ctrl |= E1000_CTRL_ADVD3WUC; wr32(E1000_CTRL, ctrl); @@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, wr32(E1000_WUFC, 0); } - *enable_wake = wufc || adapter->en_mng_pt; - if (!*enable_wake) + wake = wufc || adapter->en_mng_pt; + if (!wake) igb_power_down_link(adapter); else igb_power_up_link(adapter); + if (enable_wake) + *enable_wake = wake; + /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev) static int __maybe_unused igb_suspend(struct device *dev) { - int retval; - bool wake; - struct pci_dev *pdev = to_pci_dev(dev); - - retval = __igb_shutdown(pdev, &wake, 0); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 0); } static int __maybe_unused igb_resume(struct device *dev) @@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) static int __maybe_unused igb_runtime_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - int retval; - bool wake; - - retval = __igb_shutdown(pdev, &wake, 1); - if (retval) - return retval; - - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return __igb_shutdown(to_pci_dev(dev), NULL, 1); } static int __maybe_unused igb_runtime_resume(struct device *dev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index cc4907f9ff02..2fb97967961c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; struct mii_bus *bus; + int err = -ENODEV; - adapter->mii_bus = devm_mdiobus_alloc(dev); - if (!adapter->mii_bus) + bus = devm_mdiobus_alloc(dev); + if (!bus) return -ENOMEM; - bus = adapter->mii_bus; - switch (hw->device_id) { /* C3000 SoCs */ case IXGBE_DEV_ID_X550EM_A_KR: @@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) */ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; - return mdiobus_register(bus); + err = mdiobus_register(bus); + if (!err) { + adapter->mii_bus = bus; + return 0; + } ixgbe_no_mii_bus: devm_mdiobus_free(dev, bus); - adapter->mii_bus = NULL; - return -ENODEV; + return err; } /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 71c65cc17904..d3eaf2ceaa39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); * switching channels */ typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); +int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, mlx5e_fp_hw_modify hw_modify); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 122927f3a600..d5e5afbdca6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, if (!eproto) return -EINVAL; - if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet)) - return -EOPNOTSUPP; - err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index eac245a93f91..4ab0d030b544 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -122,7 +122,9 @@ out: return err; } -/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) + * minimum speed value is 40Gbps + */ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) { u32 speed; @@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) int err; err = mlx5e_port_linkspeed(priv->mdev, &speed); - if (err) { - mlx5_core_warn(priv->mdev, "cannot get port speed\n"); - return 0; - } + if (err) + speed = SPEED_40000; + speed = max_t(u32, speed, SPEED_40000); xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; @@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) } static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - u32 xoff, unsigned int mtu) + u32 xoff, unsigned int max_mtu) { int i; @@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) return -ENOMEM; port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; - port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; + port_buffer->buffer[i].xon = + port_buffer->buffer[i].xoff - max_mtu; } return 0; @@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, /** * update_buffer_lossy() - * mtu: device's MTU + * max_mtu: netdev's max_mtu * pfc_en: <input> current pfc configuration * buffer: <input> current prio to buffer mapping * xoff: <input> xoff value @@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * Return 0 if no error. * Set change to true if buffer configuration is modified. */ -static int update_buffer_lossy(unsigned int mtu, +static int update_buffer_lossy(unsigned int max_mtu, u8 pfc_en, u8 *buffer, u32 xoff, struct mlx5e_port_buffer *port_buffer, bool *change) @@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu, } if (changed) { - err = update_xoff_threshold(port_buffer, xoff, mtu); + err = update_xoff_threshold(port_buffer, xoff, max_mtu); if (err) return err; @@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu, return 0; } +#define MINIMUM_MAX_MTU 9216 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 change, unsigned int mtu, struct ieee_pfc *pfc, @@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, bool update_prio2buffer = false; u8 buffer[MLX5E_MAX_PRIORITY]; bool update_buffer = false; + unsigned int max_mtu; u32 total_used = 0; u8 curr_pfc_en; int err; int i; mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); + max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU); err = mlx5e_port_query_buffer(priv, &port_buffer); if (err) @@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } @@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, &port_buffer, &update_buffer); if (err) return err; @@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, - &port_buffer, &update_buffer); + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, + xoff, &port_buffer, &update_buffer); if (err) return err; } @@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, return -EINVAL; update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } @@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, /* Need to update buffer configuration if xoff value is changed */ if (!update_buffer && xoff != priv->dcbx.xoff) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9d38e62cdf24..476dd97f7f2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) { - int err; + int err = 0; rtnl_lock(); mutex_lock(&priv->state_lock); - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + + err = mlx5e_safe_reopen_channels(priv); + +out: mutex_unlock(&priv->state_lock); rtnl_unlock(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index fa2a3c444cdc..eec07b34b4ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + if (!(mlx5e_eswitch_rep(*out_dev) && + mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) + return -EOPNOTSUPP; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 3078491cc0d0..1539cf3de5dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, if (err) return err; + mutex_lock(&mdev->mlx5e_res.td.list_lock); list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); return 0; } @@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev, void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) { + mutex_lock(&mdev->mlx5e_res.td.list_lock); mlx5_core_destroy_tir(mdev, tir->tirn); list_del(&tir->list); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, @@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) } INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); + mutex_init(&mdev->mlx5e_res.td.list_lock); return 0; @@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_tir *tir; - int err = -ENOMEM; + int err = 0; u32 tirn = 0; int inlen; void *in; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + if (!in) { + err = -ENOMEM; goto out; + } if (enable_uc_lb) MLX5_SET(modify_tir_in, in, ctx.self_lb_block, @@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + mutex_lock(&mdev->mlx5e_res.td.list_lock); list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in, inlen); @@ -168,6 +176,7 @@ out: kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); + mutex_unlock(&mdev->mlx5e_res.td.list_lock); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a0987cc5fe4a..76a3d01a489e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, - unsigned long *advertising_modes, - u32 eth_proto_cap) +static void ptys2ethtool_adver_link(unsigned long *advertising_modes, + u32 eth_proto_cap, bool ext) { unsigned long proto_cap = eth_proto_cap; struct ptys2ethtool_config *table; u32 max_size; int proto; - mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); + table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; + max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : + ARRAY_SIZE(ptys2legacy_ethtool_table); + for_each_set_bit(proto, &proto_cap, max_size) bitmap_or(advertising_modes, advertising_modes, table[proto].advertised, @@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); } -static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, - u8 tx_pause, u8 rx_pause, - struct ethtool_link_ksettings *link_ksettings) +static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause, + struct ethtool_link_ksettings *link_ksettings, + bool ext) { unsigned long *advertising = link_ksettings->link_modes.advertising; - ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); + ptys2ethtool_adver_link(advertising, eth_proto_cap, ext); if (rx_pause) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); @@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; + bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); - ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); + ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, @@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, u8 an_disable_admin; u8 an_status; u8 connector_type; + bool admin_ext; bool ext; int err; @@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, eth_proto_capability); eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin); + /* Fields: eth_proto_admin and ext_eth_proto_admin are + * mutually exclusive. Hence try reading legacy advertising + * when extended advertising is zero. + * admin_ext indicates how eth_proto_admin should be + * interpreted + */ + admin_ext = ext; + if (ext && !eth_proto_admin) { + eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false, + eth_proto_admin); + admin_ext = false; + } + eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); @@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); get_supported(mdev, eth_proto_cap, link_ksettings); - get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); + get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, + admin_ext); get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; @@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) - ext_requested = (link_ksettings->link_modes.advertising[0] > - MLX5E_PTYS_EXT); + ext_requested = !!(link_ksettings->link_modes.advertising[0] > + MLX5E_PTYS_EXT || + link_ksettings->link_modes.advertising[1]); ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); - - /*when ptys_extended_ethernet is set legacy link modes are deprecated */ - if (ext_requested != ext_supported) - return -EPROTONOSUPPORT; + ext_requested &= ext_supported; speed = link_ksettings->base.speed; ethtool2ptys_adver_func = ext_requested ? mlx5e_ethtool2ptys_ext_adver_link : mlx5e_ethtool2ptys_adver_link; - err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); + err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto); if (err) { netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", __func__, err); @@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, if (!an_changes && link_modes == eproto.admin) goto out; - mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); + mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested); mlx5_toggle_port_link(mdev); out: @@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) struct mlx5e_channel *c; int i; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || + priv->channels.params.xdp_prog) return 0; for (i = 0; i < channels->num; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b5fdbd3190d9..f7eb521db580 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); - if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) + /* We disable csum_complete when XDP is enabled since + * XDP programs might manipulate packets which will render + * skb->checksum incorrect. + */ + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); return 0; @@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, return 0; } +int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) +{ + struct mlx5e_channels new_channels = {}; + + new_channels.params = priv->channels.params; + return mlx5e_safe_switch_channels(priv, &new_channels, NULL); +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { priv->tstamp.tx_type = HWTSTAMP_TX_OFF; @@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) if (!report_failed) goto unlock; - mlx5e_close_locked(priv->netdev); - err = mlx5e_open_locked(priv->netdev); + err = mlx5e_safe_reopen_channels(priv); if (err) netdev_err(priv->netdev, - "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", + "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", err); unlock: @@ -4553,7 +4564,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, { enum mlx5e_traffic_types tt; - rss_params->hfunc = ETH_RSS_HASH_XOR; + rss_params->hfunc = ETH_RSS_HASH_TOP; netdev_rss_key_fill(rss_params->toeplitz_hash_key, sizeof(rss_params->toeplitz_hash_key)); mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3dde5c7e0739..c3b3002ff62f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, { *proto = ((struct ethhdr *)skb->data)->h_proto; *proto = __vlan_get_protocol(skb, *proto, network_depth); - return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); + + if (*proto == htons(ETH_P_IP)) + return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr)); + + if (*proto == htons(ETH_P_IPV6)) + return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr)); + + return false; } static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) @@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) rq->stats->ecn_mark += !!rc; } -static u32 mlx5e_get_fcs(const struct sk_buff *skb) -{ - const void *fcs_bytes; - u32 _fcs_bytes; - - fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN, - ETH_FCS_LEN, &_fcs_bytes); - - return __get_unaligned_cpu32(fcs_bytes); -} - static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) { void *ip_p = skb->data + network_depth; @@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) +#define MAX_PADDING 8 + +static void +tail_padding_csum_slow(struct sk_buff *skb, int offset, int len, + struct mlx5e_rq_stats *stats) +{ + stats->csum_complete_tail_slow++; + skb->csum = csum_block_add(skb->csum, + skb_checksum(skb, offset, len, 0), + offset); +} + +static void +tail_padding_csum(struct sk_buff *skb, int offset, + struct mlx5e_rq_stats *stats) +{ + u8 tail_padding[MAX_PADDING]; + int len = skb->len - offset; + void *tail; + + if (unlikely(len > MAX_PADDING)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; + } + + tail = skb_header_pointer(skb, offset, len, tail_padding); + if (unlikely(!tail)) { + tail_padding_csum_slow(skb, offset, len, stats); + return; + } + + stats->csum_complete_tail++; + skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset); +} + +static void +mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, + struct mlx5e_rq_stats *stats) +{ + struct ipv6hdr *ip6; + struct iphdr *ip4; + int pkt_len; + + switch (proto) { + case htons(ETH_P_IP): + ip4 = (struct iphdr *)(skb->data + network_depth); + pkt_len = network_depth + ntohs(ip4->tot_len); + break; + case htons(ETH_P_IPV6): + ip6 = (struct ipv6hdr *)(skb->data + network_depth); + pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len); + break; + default: + return; + } + + if (likely(pkt_len >= skb->len)) + return; + + tail_padding_csum(skb, pkt_len, stats); +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) + /* True when explicitly set via priv flag, or XDP prog is loaded */ + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet @@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum = csum_partial(skb->data + ETH_HLEN, network_depth - ETH_HLEN, skb->csum); - if (unlikely(netdev->features & NETIF_F_RXFCS)) - skb->csum = csum_block_add(skb->csum, - (__force __wsum)mlx5e_get_fcs(skb), - skb->len - ETH_FCS_LEN); + + mlx5e_skb_padding_csum(skb, network_depth, proto, stats); stats->csum_complete++; return; } csum_unnecessary: if (likely((cqe->hds_ip_ext & CQE_L3_OK) && - ((cqe->hds_ip_ext & CQE_L4_OK) || - (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) { + (cqe->hds_ip_ext & CQE_L4_OK))) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1a78e05cbba8..b75aa8b8bf04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, @@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_complete_tail += rq_stats->csum_complete_tail; + s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_xdp_drop += rq_stats->xdp_drop; @@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 4640d4f986f8..16c3b785f282 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -71,6 +71,8 @@ struct mlx5e_sw_stats { u64 rx_csum_unnecessary; u64 rx_csum_none; u64 rx_csum_complete; + u64 rx_csum_complete_tail; + u64 rx_csum_complete_tail_slow; u64 rx_csum_unnecessary_inner; u64 rx_xdp_drop; u64 rx_xdp_redirect; @@ -181,6 +183,8 @@ struct mlx5e_rq_stats { u64 packets; u64 bytes; u64 csum_complete; + u64 csum_complete_tail; + u64 csum_complete_tail_slow; u64 csum_unnecessary; u64 csum_unnecessary_inner; u64 csum_none; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b4967a0ff8c7..d75dc44eb2ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, return true; } +struct ip_ttl_word { + __u8 ttl; + __u8 protocol; + __sum16 check; +}; + +struct ipv6_hoplimit_word { + __be16 payload_len; + __u8 nexthdr; + __u8 hop_limit; +}; + +static bool is_action_keys_supported(const struct flow_action_entry *act) +{ + u32 mask, offset; + u8 htype; + + htype = act->mangle.htype; + offset = act->mangle.offset; + mask = ~act->mangle.mask; + /* For IPv4 & IPv6 header check 4 byte word, + * to determine that modified fields + * are NOT ttl & hop_limit only. + */ + if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) { + struct ip_ttl_word *ttl_word = + (struct ip_ttl_word *)&mask; + + if (offset != offsetof(struct iphdr, ttl) || + ttl_word->protocol || + ttl_word->check) { + return true; + } + } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { + struct ipv6_hoplimit_word *hoplimit_word = + (struct ipv6_hoplimit_word *)&mask; + + if (offset != offsetof(struct ipv6hdr, payload_len) || + hoplimit_word->payload_len || + hoplimit_word->nexthdr) { + return true; + } + } + return false; +} + static bool modify_header_match_supported(struct mlx5_flow_spec *spec, struct flow_action *flow_action, u32 actions, @@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, { const struct flow_action_entry *act; bool modify_ip_header; - u8 htype, ip_proto; void *headers_v; u16 ethertype; + u8 ip_proto; int i; if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) @@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, act->id != FLOW_ACTION_ADD) continue; - htype = act->mangle.htype; - if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 || - htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { + if (is_action_keys_supported(act)) { modify_ip_header = true; break; } @@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, return 0; } -static inline int cmp_encap_info(struct ip_tunnel_key *a, - struct ip_tunnel_key *b) +struct encap_key { + struct ip_tunnel_key *ip_tun_key; + int tunnel_type; +}; + +static inline int cmp_encap_info(struct encap_key *a, + struct encap_key *b) { - return memcmp(a, b, sizeof(*a)); + return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || + a->tunnel_type != b->tunnel_type; } -static inline int hash_encap_info(struct ip_tunnel_key *key) +static inline int hash_encap_info(struct encap_key *key) { - return jhash(key, sizeof(*key), 0); + return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), + key->tunnel_type); } @@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_tc_flow_parse_attr *parse_attr; struct ip_tunnel_info *tun_info; - struct ip_tunnel_key *key; + struct encap_key key, e_key; struct mlx5e_encap_entry *e; unsigned short family; uintptr_t hash_key; @@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, parse_attr = attr->parse_attr; tun_info = &parse_attr->tun_info[out_index]; family = ip_tunnel_info_af(tun_info); - key = &tun_info->key; + key.ip_tun_key = &tun_info->key; + key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev); - hash_key = hash_encap_info(key); + hash_key = hash_encap_info(&key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, encap_hlist, hash_key) { - if (!cmp_encap_info(&e->tun_info.key, key)) { + e_key.ip_tun_key = &e->tun_info.key; + e_key.tunnel_type = e->tunnel_type; + if (!cmp_encap_info(&e_key, &key)) { found = true; break; } @@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { - err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, + err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB, parse_attr, hdrs, extack); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index ecd2c747f726..8a67fd197b79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); @@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(modify_esw_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); + MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } @@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) { int err; + memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb)); + err = esw_create_legacy_vepa_table(esw); if (err) return err; @@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, /* Star rule to forward all traffic to uplink vport */ memset(spec, 0, sizeof(*spec)); + memset(&dest, 0, sizeof(dest)); dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport.num = MLX5_VPORT_UPLINK; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f2260391be5b..9b2d78ee22b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) { int err; + memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); err = esw_create_offloads_fdb_tables(esw, nvports); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c index 5cf5f2a9d51f..22a2ef111514 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, return ret; } -static void mlx5_fpga_tls_release_swid(struct idr *idr, - spinlock_t *idr_spinlock, u32 swid) +static void *mlx5_fpga_tls_release_swid(struct idr *idr, + spinlock_t *idr_spinlock, u32 swid) { unsigned long flags; + void *ptr; spin_lock_irqsave(idr_spinlock, flags); - idr_remove(idr, swid); + ptr = idr_remove(idr, swid); spin_unlock_irqrestore(idr_spinlock, flags); + return ptr; } static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, @@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, kfree(buf); } -struct mlx5_teardown_stream_context { - struct mlx5_fpga_tls_command_context cmd; - u32 swid; -}; - static void mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, struct mlx5_fpga_tls_command_context *cmd, struct mlx5_fpga_dma_buf *resp) { - struct mlx5_teardown_stream_context *ctx = - container_of(cmd, struct mlx5_teardown_stream_context, cmd); - if (resp) { u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); @@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_err(fdev, "Teardown stream failed with syndrome = %d", syndrome); - else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx)) - mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, - &fdev->tls->tx_idr_spinlock, - ctx->swid); - else - mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr, - &fdev->tls->rx_idr_spinlock, - ctx->swid); } mlx5_fpga_tls_put_command_ctx(cmd); } @@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, rcu_read_lock(); flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); - rcu_read_unlock(); + if (unlikely(!flow)) { + rcu_read_unlock(); + WARN_ONCE(1, "Received NULL pointer for handle\n"); + kfree(buf); + return -EINVAL; + } mlx5_fpga_tls_flow_to_cmd(flow, cmd); + rcu_read_unlock(); MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); @@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, buf->complete = mlx_tls_kfree_complete; ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); + if (ret < 0) + kfree(buf); return ret; } @@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, void *flow, u32 swid, gfp_t flags) { - struct mlx5_teardown_stream_context *ctx; + struct mlx5_fpga_tls_command_context *ctx; struct mlx5_fpga_dma_buf *buf; void *cmd; @@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, if (!ctx) return; - buf = &ctx->cmd.buf; + buf = &ctx->buf; cmd = (ctx + 1); MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); MLX5_SET(tls_cmd, cmd, swid, swid); @@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, buf->sg[0].data = cmd; buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - ctx->swid = swid; - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, mlx5_fpga_tls_teardown_completion); } @@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, struct mlx5_fpga_tls *tls = mdev->fpga->tls; void *flow; - rcu_read_lock(); if (direction_sx) - flow = idr_find(&tls->tx_idr, swid); + flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, + &tls->tx_idr_spinlock, + swid); else - flow = idr_find(&tls->rx_idr, swid); - - rcu_read_unlock(); + flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, + &tls->rx_idr_spinlock, + swid); if (!flow) { mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", @@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, return; } + synchronize_rcu(); /* before kfree(flow) */ mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 70cc906a102b..76716419370d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = { .size = 8, .limit = 4 }, - .mr_cache[16] = { - .size = 8, - .limit = 4 - }, - .mr_cache[17] = { - .size = 8, - .limit = 4 - }, - .mr_cache[18] = { - .size = 8, - .limit = 4 - }, - .mr_cache[19] = { - .size = 4, - .limit = 2 - }, - .mr_cache[20] = { - .size = 4, - .limit = 2 - }, }, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index d23d53c0e284..f26a4ca29363 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) return 0; - emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); + emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0); if (!emad_wq) return -ENOMEM; mlxsw_core->emad_wq = emad_wq; @@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void) { int err; - mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); + mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0); if (!mlxsw_wq) return -ENOMEM; - mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, + mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0, mlxsw_core_driver_name); if (!mlxsw_owq) { err = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 9a79b5e11597..d633bef5f105 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_EGRESS, 1}, {MLXSW_REG_SBXX_DIR_EGRESS, 2}, {MLXSW_REG_SBXX_DIR_EGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 15}, }; #define MLXSW_SP_SB_ING_TC_COUNT 8 @@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), }; static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, @@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { MLXSW_SP_SB_CM(0, 7, 4), MLXSW_SP_SB_CM(0, 7, 4), MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), - MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), + MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8), MLXSW_SP_SB_CM(1, 0xff, 4), }; @@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(10000, 90000), }; static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 52fed8c7bf1e..902e766a8ed3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, /* A RIF is not created for macvlan netdevs. Their MAC is used to * populate the FDB */ - if (netif_is_macvlan(dev)) + if (netif_is_macvlan(dev) || netif_is_l3_master(dev)) return 0; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index f6ce386c3036..50111f228d77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid_index; int err = 0; - if (switchdev_trans_ph_prepare(trans)) + if (switchdev_trans_ph_commit(trans)) return 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a1d0d6e42533..d715ef4fc92f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port, struct netdev_hw_addr *hw_addr) { struct ocelot *ocelot = port->ocelot; - struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); + struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC); if (!ha) return -ENOMEM; @@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) ETH_GSTRING_LEN); } -static void ocelot_check_stats(struct work_struct *work) +static void ocelot_update_stats(struct ocelot *ocelot) { - struct delayed_work *del_work = to_delayed_work(work); - struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); int i, j; mutex_lock(&ocelot->stats_lock); @@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work) } } - cancel_delayed_work(&ocelot->stats_work); + mutex_unlock(&ocelot->stats_lock); +} + +static void ocelot_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct ocelot *ocelot = container_of(del_work, struct ocelot, + stats_work); + + ocelot_update_stats(ocelot); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); - - mutex_unlock(&ocelot->stats_lock); } static void ocelot_get_ethtool_stats(struct net_device *dev, @@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev, int i; /* check and update now */ - ocelot_check_stats(&ocelot->stats_work.work); + ocelot_update_stats(ocelot); /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) @@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot) ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), ANA_CPUQ_8021_CFG, i); - INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); + INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); return 0; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 7cde387e5ec6..51cd57ab3d95 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); + memblock = NULL; goto exit; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index eeda4ed98333..e336f6ee94f5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, tmp_push_vlan_tci = FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | - NFP_FL_PUSH_VLAN_CFI; + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 4fcaf11ed56e..0ed51e79db00 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -26,7 +26,7 @@ #define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) #define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) -#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) +#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12) #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) #define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) @@ -82,7 +82,6 @@ #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) #define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) -#define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) #define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e03c8ef2c28c..9b8b843d0340 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, flow_rule_match_vlan(rule, &match); /* Populate the tci field. */ - if (match.key->vlan_id || match.key->vlan_priority) { - tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - match.key->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - match.key->vlan_id) | - NFP_FLOWER_MASK_VLAN_CFI; - ext->tci = cpu_to_be16(tmp_tci); - tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - match.mask->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - match.mask->vlan_id) | - NFP_FLOWER_MASK_VLAN_CFI; - msk->tci = cpu_to_be16(tmp_tci); - } + tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + match.key->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + match.key->vlan_id); + ext->tci = cpu_to_be16(tmp_tci); + + tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + match.mask->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + match.mask->vlan_id); + msk->tci = cpu_to_be16(tmp_tci); } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d2c803bb4e56..94d228c04496 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) ret = dev_queue_xmit(skb); nfp_repr_inc_tx_stats(netdev, len, ret); - return ret; + return NETDEV_TX_OK; } static int nfp_repr_stop(struct net_device *netdev) @@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; - netdev->priv_flags |= IFF_NO_QUEUE; + netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; netdev->features |= NETIF_F_LLTX; if (nfp_app_has_tc(app)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 43a57ec296fd..127c89b22ef0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -431,12 +431,16 @@ struct qed_qm_info { u8 num_pf_rls; }; +#define QED_OVERFLOW_BIT 1 + struct qed_db_recovery_info { struct list_head list; /* Lock to protect the doorbell recovery mechanism list */ spinlock_t lock; + bool dorq_attn; u32 db_recovery_counter; + unsigned long overflow; }; struct storm_stats { @@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); /* doorbell recovery mechanism */ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); -void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, - enum qed_db_rec_exec db_exec); +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); /* Other Linux specific common definitions */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 9df8c4b3b54e..866cdc86a3f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, /* Doorbell address sanity (address within doorbell bar range) */ static bool qed_db_rec_sanity(struct qed_dev *cdev, - void __iomem *db_addr, void *db_data) + void __iomem *db_addr, + enum qed_db_rec_width db_width, + void *db_data) { + u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; + /* Make sure doorbell address is within the doorbell bar */ if (db_addr < cdev->doorbells || - (u8 __iomem *)db_addr > + (u8 __iomem *)db_addr + width > (u8 __iomem *)cdev->doorbells + cdev->db_size) { WARN(true, "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", @@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev, } /* Sanitize doorbell address */ - if (!qed_db_rec_sanity(cdev, db_addr, db_data)) + if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) return -EINVAL; /* Obtain hwfn from doorbell address */ @@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev, return 0; } - /* Sanitize doorbell address */ - if (!qed_db_rec_sanity(cdev, db_addr, db_data)) - return -EINVAL; - /* Obtain hwfn from doorbell address */ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); @@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) /* Ring the doorbell of a single doorbell recovery entry */ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, - struct qed_db_recovery_entry *db_entry, - enum qed_db_rec_exec db_exec) -{ - if (db_exec != DB_REC_ONCE) { - /* Print according to width */ - if (db_entry->db_width == DB_REC_WIDTH_32B) { - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "%s doorbell address %p data %x\n", - db_exec == DB_REC_DRY_RUN ? - "would have rung" : "ringing", - db_entry->db_addr, - *(u32 *)db_entry->db_data); - } else { - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "%s doorbell address %p data %llx\n", - db_exec == DB_REC_DRY_RUN ? - "would have rung" : "ringing", - db_entry->db_addr, - *(u64 *)(db_entry->db_data)); - } + struct qed_db_recovery_entry *db_entry) +{ + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %x\n", + db_entry->db_addr, + *(u32 *)db_entry->db_data); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %llx\n", + db_entry->db_addr, + *(u64 *)(db_entry->db_data)); } /* Sanity */ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, - db_entry->db_data)) + db_entry->db_width, db_entry->db_data)) return; /* Flush the write combined buffer. Since there are multiple doorbelling @@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, wmb(); /* Ring the doorbell */ - if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { - if (db_entry->db_width == DB_REC_WIDTH_32B) - DIRECT_REG_WR(db_entry->db_addr, - *(u32 *)(db_entry->db_data)); - else - DIRECT_REG_WR64(db_entry->db_addr, - *(u64 *)(db_entry->db_data)); - } + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(db_entry->db_addr, + *(u64 *)(db_entry->db_data)); /* Flush the write combined buffer. Next doorbell may come from a * different entity to the same address... @@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, } /* Traverse the doorbell recovery entry list and ring all the doorbells */ -void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, - enum qed_db_rec_exec db_exec) +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) { struct qed_db_recovery_entry *db_entry = NULL; - if (db_exec != DB_REC_ONCE) { - DP_NOTICE(p_hwfn, - "Executing doorbell recovery. Counter was %d\n", - p_hwfn->db_recovery_info.db_recovery_counter); + DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); - /* Track amount of times recovery was executed */ - p_hwfn->db_recovery_info.db_recovery_counter++; - } + /* Track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; /* Protect the list */ spin_lock_bh(&p_hwfn->db_recovery_info.lock); list_for_each_entry(db_entry, - &p_hwfn->db_recovery_info.list, list_entry) { - qed_db_recovery_ring(p_hwfn, db_entry, db_exec); - if (db_exec == DB_REC_ONCE) - break; - } - + &p_hwfn->db_recovery_info.list, list_entry) + qed_db_recovery_ring(p_hwfn, db_entry); spin_unlock_bh(&p_hwfn->db_recovery_info.lock); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index e23980e301b6..8848d5bed6e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, u32 count = QED_DB_REC_COUNT; u32 usage = 1; + /* Flush any pending (e)dpms as they may never arrive */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + /* wait for usage to zero or count to run out. This is necessary since * EDPM doorbell transactions can take multiple 64b cycles, and as such * can "split" over the pci. Possibly, the doorbell drop can happen with @@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 overflow; + u32 attn_ovfl, cur_ovfl; int rc; - overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); - DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); - if (!overflow) { - qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); + attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, + &p_hwfn->db_recovery_info.overflow); + cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!cur_ovfl && !attn_ovfl) return 0; - } - if (qed_edpm_enabled(p_hwfn)) { + DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", + attn_ovfl, cur_ovfl); + + if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); if (rc) return rc; } - /* Flush any pending (e)dpm as they may never arrive */ - qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); - /* Release overflow sticky indication (stop silently dropping everything) */ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); /* Repeat all last doorbells (doorbell drop recovery) */ - qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); + qed_db_recovery_execute(p_hwfn); return 0; } -static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) { - u32 int_sts, first_drop_reason, details, address, all_drops_reason; struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + u32 overflow; int rc; - int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); - DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!overflow) + goto out; + + /* Run PF doorbell recovery in next periodic handler */ + set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); + + if (!p_hwfn->db_bar_no_edpm) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + goto out; + } + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); +out: + /* Schedule the handler even if overflow was not detected */ + qed_periodic_db_rec_start(p_hwfn); +} + +static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) +{ + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; /* int_sts may be zero since all PFs were interrupted for doorbell * overflow but another one already handled it. Can abort here. If * This PF also requires overflow recovery we will be interrupted again. * The masked almost full indication may also be set. Ignoring. */ + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) return 0; + DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + /* check if db_drop or overflow happened */ if (int_sts & (DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { @@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, first_drop_reason, all_drops_reason); - rc = qed_db_rec_handler(p_hwfn, p_ptt); - qed_periodic_db_rec_start(p_hwfn); - if (rc) - return rc; - /* Clear the doorbell drop details and prepare for next drop */ qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); @@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) return -EINVAL; } +static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +{ + p_hwfn->db_recovery_info.dorq_attn = true; + qed_dorq_attn_overflow(p_hwfn); + + return qed_dorq_attn_int_sts(p_hwfn); +} + +static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->db_recovery_info.dorq_attn) + goto out; + + /* Call DORQ callback if the attention was missed */ + qed_dorq_attn_cb(p_hwfn); +out: + p_hwfn->db_recovery_info.dorq_attn = false; +} + /* Instead of major changes to the data-structure, we have a some 'special' * identifiers for sources that changed meaning between adapters. */ @@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, } } + /* Handle missed DORQ attention */ + qed_dorq_attn_handler(p_hwfn); + /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_IGU_CMD + diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 1f356ed4f761..d473b522afc5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** * @brief - Doorbell Recovery handler. - * Run DB_REAL_DEAL doorbell recovery in case of PF overflow - * (and flush DORQ if needed), otherwise run DB_REC_ONCE. + * Run doorbell recovery in case of PF overflow (and flush DORQ if + * needed). * * @param p_hwfn * @param p_ptt diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f164d4acebcb..6de23b56b294 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } -#define QED_PERIODIC_DB_REC_COUNT 100 +#define QED_PERIODIC_DB_REC_COUNT 10 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 #define QED_PERIODIC_DB_REC_INTERVAL \ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9faaa6df78ed..2f318aaf2b05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; } else { DP_INFO(p_hwfn, - "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", vf->abs_vf_id, req->vfdev_info.eth_fp_hsi_major, req->vfdev_info.eth_fp_hsi_minor, diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 5f3f42a25361..bddb2b5982dc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { - rc = -EINVAL; DP_ERR(edev, "PTP clock registration failed\n"); + qede_ptp_disable(edev); + rc = -EINVAL; goto err2; } return 0; -err2: - qede_ptp_disable(edev); - ptp->clock = NULL; err1: kfree(ptp); +err2: edev->ptp = NULL; return rc; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 0c443ea98479..374a4d4371f9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -497,7 +497,7 @@ struct qlcnic_hardware_context { u16 board_type; u16 supported_type; - u16 link_speed; + u32 link_speed; u16 link_duplex; u16 link_autoneg; u16 module_type; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7562ccbbb39a..ed651dde6ef9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -28,6 +28,7 @@ #include <linux/pm_runtime.h> #include <linux/firmware.h> #include <linux/prefetch.h> +#include <linux/pci-aspm.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> @@ -5460,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) tp->cp_cmd |= PktCntrDisable | INTT_1; RTL_W16(tp, CPlusCmd, tp->cp_cmd); - RTL_W16(tp, IntrMitigate, 0x5151); + RTL_W16(tp, IntrMitigate, 0x5100); /* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { @@ -7352,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; + /* Disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 40d6356a7e73..3dfb07a78952 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -29,11 +29,13 @@ /* Specific functions used for Ring mode */ /* Enhanced descriptors */ -static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end, + int bfsize) { - p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - << ERDES1_BUFFER2_SIZE_SHIFT) - & ERDES1_BUFFER2_SIZE_MASK); + if (bfsize == BUF_SIZE_16KiB) + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB + << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK); if (end) p->des1 |= cpu_to_le32(ERDES1_END_RING); @@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) } /* Normal descriptors */ -static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize) { - p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) - << RDES1_BUFFER2_SIZE_SHIFT) - & RDES1_BUFFER2_SIZE_MASK); + if (bfsize >= BUF_SIZE_2KiB) { + int bfsize2; + + bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK); + } if (end) p->des1 |= cpu_to_le32(RDES1_END_RING); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 7fbb6a4dbf51..e061e9f5fad7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -296,7 +296,7 @@ exit: } static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwmac4_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index 1d858fdec997..98fa471da7c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc, } static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { dwxgmac2_set_rx_owner(p, disable_rx_ic); } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 5ef91a790f9d..5202d6ad7919 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, if (unlikely(rdes0 & RDES0_OWN)) return dma_own; + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { + stats->rx_length_errors++; + return discard_frame; + } + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++; @@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, * It doesn't match with the information reported into the databook. * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ - ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), - !!(rdes0 & RDES0_FRAME_TYPE), - !!(rdes0 & ERDES0_RX_MAC_ADDR)); + if (likely(ret == good_frame)) + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR)); if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++; @@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, } static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, - int mode, int end) + int mode, int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_8KiB); + p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ehn_desc_rx_set_on_chain(p); else - ehn_desc_rx_set_on_ring(p, end); + ehn_desc_rx_set_on_ring(p, end, bfsize); if (disable_rx_ic) p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 92b8944f26e3..5bb00234d961 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -33,7 +33,7 @@ struct dma_extended_desc; struct stmmac_desc_ops { /* DMA RX descriptor ring initialization */ void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, - int end); + int end, int bfsize); /* DMA TX descriptor ring initialization */ void (*init_tx_desc)(struct dma_desc *p, int mode, int end); /* Invoked by the xmit function to prepare the tx descriptor */ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index de65bb29feba..b7dd4e3c760d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, return dma_own; if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { - pr_warn("%s: Oversized frame spanned multiple buffers\n", - __func__); stats->rx_length_errors++; return discard_frame; } @@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, } static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, - int end) + int end, int bfsize) { + int bfsize1; + p->des0 |= cpu_to_le32(RDES0_OWN); - p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); + + bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1); + p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) ndesc_rx_set_on_chain(p, end); else - ndesc_rx_set_on_ring(p, end); + ndesc_rx_set_on_ring(p, end, bfsize); if (disable_rx_ic) p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6a2e1031a62a..a26e36dbb5df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) if (priv->extend_desc) stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); else stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + (i == DMA_RX_SIZE - 1), + priv->dma_buf_sz); } /** @@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_channel *ch = &priv->channel[queue]; - unsigned int entry = rx_q->cur_rx; + unsigned int next_entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; - unsigned int next_entry; unsigned int count = 0; bool xmac; @@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { - int status; + int entry, status; struct dma_desc *p; struct dma_desc *np; + entry = next_entry; + if (priv->extend_desc) p = (struct dma_desc *)(rx_q->dma_erx + entry); else @@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) * ignored */ if (frame_len > priv->dma_buf_sz) { - netdev_err(priv->dev, - "len %d larger than size (%d)\n", - frame_len, priv->dma_buf_sz); + if (net_ratelimit()) + netdev_err(priv->dev, + "len %d larger than size (%d)\n", + frame_len, priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; - break; + continue; } /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 @@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dev_warn(priv->device, "packet dropped\n"); priv->dev->stats.rx_dropped++; - break; + continue; } dma_sync_single_for_cpu(priv->device, @@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) } else { skb = rx_q->rx_skbuff[entry]; if (unlikely(!skb)) { - netdev_err(priv->dev, - "%s: Inconsistent Rx chain\n", - priv->dev->name); + if (net_ratelimit()) + netdev_err(priv->dev, + "%s: Inconsistent Rx chain\n", + priv->dev->name); priv->dev->stats.rx_dropped++; - break; + continue; } prefetch(skb->data - NET_IP_ALIGN); rx_q->rx_skbuff[entry] = NULL; @@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; } - entry = next_entry; } stmmac_rx_refill(priv, queue); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index e859ae2e42d5..49f41b64077b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -987,6 +987,7 @@ struct netvsc_device { wait_queue_head_t wait_drain; bool destroy; + bool tx_disable; /* if true, do not wake up queue again */ /* Receive buffer allocated by us but manages by NetVSP */ void *recv_buf; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 813d195bbd57..e0dce373cdd9 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; + net_device->tx_disable = false; net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; @@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, } else { struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - if (netif_tx_queue_stopped(txq) && + if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && (hv_get_avail_to_write_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { netif_tx_wake_queue(txq); @@ -874,7 +875,8 @@ static inline int netvsc_send_pkt( } else if (ret == -EAGAIN) { netif_tx_stop_queue(txq); ndev_ctx->eth_stats.stop_queue++; - if (atomic_read(&nvchan->queue_sends) < 1) { + if (atomic_read(&nvchan->queue_sends) < 1 && + !net_device->tx_disable) { netif_tx_wake_queue(txq); ndev_ctx->eth_stats.wake_queue++; ret = -ENOSPC; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index cf4897043e83..b20fb0fb595b 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net) rcu_read_unlock(); } +static void netvsc_tx_enable(struct netvsc_device *nvscdev, + struct net_device *ndev) +{ + nvscdev->tx_disable = false; + virt_wmb(); /* ensure queue wake up mechanism is on */ + + netif_tx_wake_all_queues(ndev); +} + static int netvsc_open(struct net_device *net) { struct net_device_context *ndev_ctx = netdev_priv(net); @@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net) rdev = nvdev->extension; if (!rdev->link_state) { netif_carrier_on(net); - netif_tx_wake_all_queues(net); + netvsc_tx_enable(nvdev, net); } if (vf_netdev) { @@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev) } } +static void netvsc_tx_disable(struct netvsc_device *nvscdev, + struct net_device *ndev) +{ + if (nvscdev) { + nvscdev->tx_disable = true; + virt_wmb(); /* ensure txq will not wake up after stop */ + } + + netif_tx_disable(ndev); +} + static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); @@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net) struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); int ret; - netif_tx_disable(net); + netvsc_tx_disable(nvdev, net); /* No need to close rndis filter if it is removed already */ if (!nvdev) @@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev, /* If device was up (receiving) then shutdown */ if (netif_running(ndev)) { - netif_tx_disable(ndev); + netvsc_tx_disable(nvdev, ndev); ret = rndis_filter_close(nvdev); if (ret) { @@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w) if (rdev->link_state) { rdev->link_state = false; netif_carrier_on(net); - netif_tx_wake_all_queues(net); + netvsc_tx_enable(net_device, net); } else { notify = true; } @@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w) if (!rdev->link_state) { rdev->link_state = true; netif_carrier_off(net); - netif_tx_stop_all_queues(net); + netvsc_tx_disable(net_device, net); } kfree(event); break; @@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w) if (!rdev->link_state) { rdev->link_state = true; netif_carrier_off(net); - netif_tx_stop_all_queues(net); + netvsc_tx_disable(net_device, net); event->event = RNDIS_STATUS_MEDIA_CONNECT; spin_lock_irqsave(&ndev_ctx->lock, flags); list_add(&event->list, &ndev_ctx->reconfig_events); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6ed96fdfd96d..9ce61b019aad 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1246,6 +1246,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_option_port_add; } + /* set promiscuity level to new slave */ + if (dev->flags & IFF_PROMISC) { + err = dev_set_promiscuity(port_dev, 1); + if (err) + goto err_set_slave_promisc; + } + + /* set allmulti level to new slave */ + if (dev->flags & IFF_ALLMULTI) { + err = dev_set_allmulti(port_dev, 1); + if (err) { + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(port_dev, -1); + goto err_set_slave_promisc; + } + } + netif_addr_lock_bh(dev); dev_uc_sync_multiple(port_dev, dev); dev_mc_sync_multiple(port_dev, dev); @@ -1262,6 +1279,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return 0; +err_set_slave_promisc: + __team_option_inst_del_port(team, port); + err_option_port_add: team_upper_dev_unlink(team, port); @@ -1307,6 +1327,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev) team_port_disable(team, port); list_del_rcu(&port->list); + + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(port_dev, -1); + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(port_dev, -1); + team_upper_dev_unlink(team, port); netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 74bebbdb4b15..9195f3476b1d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 7c1430ed0244..cd15c32b2e43 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1273,9 +1273,14 @@ static void vrf_setup(struct net_device *dev) /* default to no qdisc; user can add if desired */ dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_NO_RX_HANDLER; - dev->min_mtu = 0; - dev->max_mtu = 0; + /* VRF devices do not care about MTU, but if the MTU is set + * too low then the ipv4 and ipv6 protocols are disabled + * which breaks networking. + */ + dev->min_mtu = IPV6_MIN_MTU; + dev->max_mtu = ETH_MAX_MTU; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index a20ea270d519..1acc622d2183 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) num_msdus++; num_bytes += ret; } - ieee80211_return_txq(hw, txq); + ieee80211_return_txq(hw, txq, false); ieee80211_txq_schedule_end(hw, txq->ac); record->num_msdus = cpu_to_le16(num_msdus); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b73c23d4ce86..41e89db244d2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) if (ret < 0) break; } - ieee80211_return_txq(hw, txq); + ieee80211_return_txq(hw, txq, false); ath10k_htt_tx_txq_update(hw, txq); if (ret == -EBUSY) break; @@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, if (ret < 0) break; } - ieee80211_return_txq(hw, txq); + ieee80211_return_txq(hw, txq, false); ath10k_htt_tx_txq_update(hw, txq); out: ieee80211_txq_schedule_end(hw, ac); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 773d428ff1b0..b17e1ca40995 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) goto out; while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { + bool force; + tid = (struct ath_atx_tid *)queue->drv_priv; ret = ath_tx_sched_aggr(sc, txq, tid); ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); - ieee80211_return_txq(hw, queue); + force = !skb_queue_empty(&tid->retry_q); + ieee80211_return_txq(hw, queue, force); } out: diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index fdc56f821b5a..eb6defb6d0cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -82,6 +82,7 @@ #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" +#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" @@ -105,8 +106,8 @@ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" -#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ - IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ @@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; -const struct iwl_cfg iwl22260_2ax_cfg = { - .name = "Intel(R) Wireless-AX 22260", +const struct iwl_cfg iwl_ax101_cfg_quz_hr = { + .name = "Intel(R) Wi-Fi 6 AX101", + .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg iwl_ax200_cfg_cc = { + .name = "Intel(R) Wi-Fi 6 AX200 160MHz", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* @@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = { }; const struct iwl_cfg killer1650x_2ax_cfg = { - .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* @@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = { }; const struct iwl_cfg killer1650w_2ax_cfg = { - .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)", .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* @@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { }; const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { }; const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index f119c49cd39c..d7380016f1c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, if (!range) { IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); + memset(*data, 0, le32_to_cpu((*data)->len)); return; } @@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, if (range_size < 0) { IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", le32_to_cpu(reg->region_id), type); + memset(*data, 0, le32_to_cpu((*data)->len)); return; } range = range + range_size; @@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, trigger = fwrt->dump.active_trigs[id].trig; - size = sizeof(*dump_file); - size += iwl_fw_ini_get_trigger_len(fwrt, trigger); - + size = iwl_fw_ini_get_trigger_len(fwrt, trigger); if (!size) return NULL; + size += sizeof(*dump_file); + dump_file = vzalloc(size); if (!dump_file) return NULL; @@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, iwl_dump_error_desc->len = 0; ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); - if (ret) { + if (ret) kfree(iwl_dump_error_desc); - } else { - set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); - - /* trigger nmi to halt the fw */ - iwl_force_nmi(fwrt->trans); - } + else + iwl_trans_sync_nmi(fwrt->trans); return ret; } @@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) { - /* if the wait event timeout elapses instead of wake up then - * the driver did not receive NMI interrupt and can not assume the FW - * is halted - */ - int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq, - !test_bit(STATUS_FW_WAIT_DUMP, - &fwrt->trans->status), - msecs_to_jiffies(2000)); - if (!ret) { - /* failed to receive NMI interrupt, assuming the FW is stuck */ - set_bit(STATUS_FW_ERROR, &fwrt->trans->status); - - clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); - } - - /* Assuming the op mode mutex is held at this point */ iwl_fw_dbg_collect_sync(fwrt); iwl_trans_stop_device(fwrt->trans); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 7adf4e4e841a..12310e3d2fc5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, fwrt->ops_ctx = ops_ctx; INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); - init_waitqueue_head(&fwrt->trans->fw_halt_waitq); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f5f87773667b..93070848280a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; +extern const struct iwl_cfg iwl_ax101_cfg_quz_hr; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; -extern const struct iwl_cfg iwl22260_2ax_cfg; +extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index aea6d03e545a..e539bc94eff7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -327,6 +327,7 @@ enum { #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) +#define CSR_HW_REV_TYPE_QUZ (0x0000354) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) #define CSR_HW_REV_TYPE_SO (0x0000370) #define CSR_HW_REV_TYPE_TY (0x0000420) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index bbebbf3efd57..d8690acee40c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -338,7 +338,6 @@ enum iwl_d3_status { * are sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation - * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, @@ -351,7 +350,6 @@ enum iwl_trans_status { STATUS_TRANS_GOING_IDLE, STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, - STATUS_FW_WAIT_DUMP, }; static inline int @@ -618,6 +616,7 @@ struct iwl_trans_ops { struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, u32 dump_mask); void (*debugfs_cleanup)(struct iwl_trans *trans); + void (*sync_nmi)(struct iwl_trans *trans); }; /** @@ -831,7 +830,6 @@ struct iwl_trans { u32 lmac_error_event_table[2]; u32 umac_error_event_table; unsigned int error_event_table_tlv_status; - wait_queue_head_t fw_halt_waitq; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ @@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) iwl_op_mode_nic_error(trans->op_mode); +} - if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status)) - wake_up(&trans->fw_halt_waitq); - +static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) +{ + if (trans->ops->sync_nmi) + trans->ops->sync_nmi(trans); } /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3a92c09d4692..6a3b11dd2edf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_remove(mvm, vif); - kfree(mvmvif->ap_wep_key); - mvmvif->ap_wep_key = NULL; - mutex_unlock(&mvm->mutex); } @@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { - /* if wep is used, need to set the key for the station now */ - if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { - mvm_sta->wep_key = - kmemdup(mvmvif->ap_wep_key, - sizeof(*mvmvif->ap_wep_key) + - mvmvif->ap_wep_key->keylen, - GFP_KERNEL); - if (!mvm_sta->wep_key) { - ret = -ENOMEM; - goto out_unlock; - } - - ret = iwl_mvm_set_sta_key(mvm, vif, sta, - mvm_sta->wep_key, - STA_KEY_IDX_INVALID); - } else { - ret = 0; - } + ret = 0; /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) @@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, NL80211_TDLS_DISABLE_LINK); } - /* Remove STA key if this is an AP using WEP */ - if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { - int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta, - mvm_sta->wep_key); - - if (!ret) - ret = rm_ret; - kfree(mvm_sta->wep_key); - mvm_sta->wep_key = NULL; - } - if (unlikely(ret && test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status))) @@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (changed & (IEEE80211_RC_BW_CHANGED | + IEEE80211_RC_SUPP_RATES_CHANGED | + IEEE80211_RC_NSS_CHANGED)) + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + true); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) @@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - if (vif->type == NL80211_IFTYPE_AP) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(vif); - - mvmvif->ap_wep_key = kmemdup(key, - sizeof(*key) + key->keylen, - GFP_KERNEL); - if (!mvmvif->ap_wep_key) - return -ENOMEM; - } - - if (vif->type != NL80211_IFTYPE_STATION) - return 0; - break; + if (vif->type == NL80211_IFTYPE_STATION) + break; + if (iwl_mvm_has_new_tx_api(mvm)) + return -EOPNOTSUPP; + /* support HW crypto on TX */ + return 0; default: /* currently FW supports only one optional cipher scheme */ if (hw->n_cipher_schemes && @@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); + key->hw_key_idx = STA_KEY_IDX_INVALID; /* * can't add key for RX, but we don't need it - * in the device for TX so still return 0 + * in the device for TX so still return 0, + * unless we have new TX API where we cannot + * put key material into the TX_CMD */ - key->hw_key_idx = STA_KEY_IDX_INVALID; - ret = 0; + if (iwl_mvm_has_new_tx_api(mvm)) + ret = -EOPNOTSUPP; + else + ret = 0; } break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index bca6f6b536d9..a50dc53df086 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -498,7 +498,6 @@ struct iwl_mvm_vif { netdev_features_t features; struct iwl_probe_resp_data __rcu *probe_resp_data; - struct ieee80211_key_conf *ap_wep_key; }; static inline struct iwl_mvm_vif * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 498c315291cf..98d123dd7177 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); list_del_init(&mvmtxq->list); + local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); + local_bh_enable(); } mutex_unlock(&mvm->mutex); @@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); - if (mvmvif->ap_wep_key) { - u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); - - __set_bit(key_offset, mvm->fw_key_table); - - if (key_offset == STA_KEY_IDX_INVALID) - return -ENOSPC; - - ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, - mvmvif->ap_wep_key, true, 0, NULL, 0, - key_offset, 0); - if (ret) - return ret; - } - return 0; } @@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); - if (mvmvif->ap_wep_key) { - int i; - - if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx, - mvm->fw_key_table)) { - IWL_ERR(mvm, "offset %d not used in fw key table.\n", - mvmvif->ap_wep_key->hw_key_idx); - return -ENOENT; - } - - /* track which key was deleted last */ - for (i = 0; i < STA_KEY_MAX_NUM; i++) { - if (mvm->fw_key_deleted[i] < U8_MAX) - mvm->fw_key_deleted[i]++; - } - mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0; - ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id, - mvmvif->ap_wep_key, true); - if (ret) - return ret; - } - ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 79700c7310a1..b4d4071b865d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data { * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data - * @wep_key: used in AP mode. Is a duplicate of the WEP key. * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake @@ -426,8 +425,6 @@ struct iwl_mvm_sta { struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; - struct ieee80211_key_conf *wep_key; - u8 reserved_queue; /* Temporary, until the new TLC will control the Tx protection */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 2b94e4cef56c..9f1af8da9dc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, - {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)}, - {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)}, + {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)}, {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index bf8b61a476c5..59213164f35e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); -void iwl_trans_sync_nmi(struct iwl_trans *trans); +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index fe8269d023de..79c1dc05f948 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans) .unref = iwl_trans_pcie_unref, \ .dump_data = iwl_trans_pcie_dump_data, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ - .d3_resume = iwl_trans_pcie_d3_resume + .d3_resume = iwl_trans_pcie_d3_resume, \ + .sync_nmi = iwl_trans_pcie_sync_nmi #ifdef CONFIG_PM_SLEEP #define IWL_TRANS_PM_OPS \ @@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (cfg == &iwl_ax101_cfg_qu_hr) { if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { + trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { trans->cfg = &iwl_ax101_cfg_qu_hr; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == @@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && - (trans->cfg != &iwl22260_2ax_cfg || + (trans->cfg != &iwl_ax200_cfg_cc || trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { u32 hw_status; @@ -3637,7 +3642,7 @@ out_no_pci: return ERR_PTR(ret); } -void iwl_trans_sync_nmi(struct iwl_trans *trans) +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) { unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 88530d9f4a54..38d110338987 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, cmd_str); ret = -ETIMEDOUT; - iwl_trans_sync_nmi(trans); + iwl_trans_pcie_sync_nmi(trans); goto cancel; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 9fbd37d23e85..7be73e2c4681 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id)); ret = -ETIMEDOUT; - iwl_trans_sync_nmi(trans); + iwl_trans_pcie_sync_nmi(trans); goto cancel; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0838af04d681..524eb5805995 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, enum nl80211_band band; const struct ieee80211_ops *ops = &mac80211_hwsim_ops; struct net *net; - int idx; + int idx, i; int n_limits = 0; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) @@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, goto failed_hw; } + data->if_combination.max_interfaces = 0; + for (i = 0; i < n_limits; i++) + data->if_combination.max_interfaces += + data->if_limits[i].max; + data->if_combination.n_limits = n_limits; - data->if_combination.max_interfaces = 2048; data->if_combination.limits = data->if_limits; - hw->wiphy->iface_combinations = &data->if_combination; - hw->wiphy->n_iface_combinations = 1; + /* + * If we actually were asked to support combinations, + * advertise them - if there's only a single thing like + * only IBSS then don't advertise it as combinations. + */ + if (data->if_combination.max_interfaces > 1) { + hw->wiphy->iface_combinations = &data->if_combination; + hw->wiphy->n_iface_combinations = 1; + } if (param->ciphers) { memcpy(data->ciphers, param->ciphers, diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index d54dda67d036..3af45949e868 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev) bus_ops->rmw = mt7603_rmw; dev->mt76.bus = bus_ops; + spin_lock_init(&dev->ps_lock); + INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, (unsigned long)dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 5e31d7da96fc..5abc02b57818 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) MT_BA_CONTROL_1_RESET)); } -void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, +void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ba_size) { u32 addr = mt7603_wtbl2_addr(wcid); @@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, mt76_clear(dev, addr + (15 * 4), tid_mask); return; } - mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); - - mt7603_mac_stop(dev); - switch (tid) { - case 0: - mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn); - break; - case 1: - mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn); - break; - case 2: - mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO, - ssn); - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI, - ssn >> 8); - break; - case 3: - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn); - break; - case 4: - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn); - break; - case 5: - mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO, - ssn); - mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI, - ssn >> 4); - break; - case 6: - mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn); - break; - case 7: - mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn); - break; - } - mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2); - mt7603_mac_start(dev); for (i = 7; i > 0; i--) { if (ba_size >= MT_AGG_SIZE_LIMIT(i)) @@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rate = &info->control.rates[0]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; struct ieee80211_vif *vif = info->control.vif; struct mt7603_vif *mvif; int wlan_idx; @@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, int tx_count = 8; u8 frame_type, frame_subtype; u16 fc = le16_to_cpu(hdr->frame_control); + u16 seqno = 0; u8 vif_idx = 0; u32 val; u8 bw; @@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, tx_count = 0x1f; val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | - FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl)); + MT_TXD3_SN_VALID; + + if (ieee80211_is_data_qos(hdr->frame_control)) + seqno = le16_to_cpu(hdr->seq_ctrl); + else if (ieee80211_is_back_req(hdr->frame_control)) + seqno = le16_to_cpu(bar->start_seq_num); + else + val &= ~MT_TXD3_SN_VALID; + + val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4); + txwi[3] = cpu_to_le32(val); if (key) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index cc0fe0933b2d..a3c4ef198bfe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; struct sk_buff_head list; - mt76_stop_tx_queues(&dev->mt76, sta, false); + mt76_stop_tx_queues(&dev->mt76, sta, true); mt7603_wtbl_set_ps(dev, msta, ps); if (ps) return; @@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size); + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); - mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); break; case IEEE80211_AMPDU_TX_START: mtxq->agg_ssn = *ssn << 4; @@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 79f332429432..6049f3b7c8fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval); int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); -void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, +void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ba_size); void mt7603_pse_client_reset(struct mt7603_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 9ed231abe916..4fe5a83ca5a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, return; rcu_read_lock(); - mt76_tx_status_lock(mdev, &list); if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); @@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, drv_priv); } + mt76_tx_status_lock(mdev, &list); + if (wcid) { if (stat->pktid >= MT_PACKET_ID_FIRST) status.skb = mt76_tx_status_skb_get(mdev, wcid, @@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, if (*update == 0 && stat_val == stat_cache && stat->wcid == msta->status.wcid && msta->n_frames < 32) { msta->n_frames++; - goto out; + mt76_tx_status_unlock(mdev, &list); + rcu_read_unlock(); + return; } mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, @@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, if (status.skb) mt76_tx_status_skb_done(mdev, status.skb, &list); - else - ieee80211_tx_status_ext(mt76_hw(dev), &status); - -out: mt76_tx_status_unlock(mdev, &list); + + if (!status.skb) + ieee80211_tx_status_ext(mt76_hw(dev), &status); rcu_read_unlock(); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 4b1744e9fb78..50b92ca92bd7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -673,7 +673,6 @@ enum rt2x00_state_flags { CONFIG_CHANNEL_HT40, CONFIG_POWERSAVING, CONFIG_HT_DISABLED, - CONFIG_QOS_DISABLED, CONFIG_MONITORING, /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 2825560e2424..e8462f25d252 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, rt2x00dev->intf_associated--; rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); - - clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); } /* - * Check for access point which do not support 802.11e . We have to - * generate data frames sequence number in S/W for such AP, because - * of H/W bug. - */ - if (changes & BSS_CHANGED_QOS && !bss_conf->qos) - set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags); - - /* * When the erp information has changed, we should perform * additional configuration steps. For all other changes we are done. */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 92ddc19e7bf7..4834b4eb0206 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { /* * rt2800 has a H/W (or F/W) bug, device incorrectly increase - * seqno on retransmited data (non-QOS) frames. To workaround - * the problem let's generate seqno in software if QOS is - * disabled. + * seqno on retransmitted data (non-QOS) and management frames. + * To workaround the problem let's generate seqno in software. + * Except for beacons which are transmitted periodically by H/W + * hence hardware has to assign seqno for them. */ - if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) - __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); - else + if (ieee80211_is_beacon(hdr->frame_control)) { + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); /* H/W will generate sequence number */ return; + } + + __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); } /* diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index b72a303176c7..9486acc08402 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -198,14 +198,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, return NULL; nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL); - if (nd_btt->id < 0) { - kfree(nd_btt); - return NULL; - } + if (nd_btt->id < 0) + goto out_nd_btt; nd_btt->lbasize = lbasize; - if (uuid) + if (uuid) { uuid = kmemdup(uuid, 16, GFP_KERNEL); + if (!uuid) + goto out_put_id; + } nd_btt->uuid = uuid; dev = &nd_btt->dev; dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id); @@ -220,6 +221,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, return NULL; } return dev; + +out_put_id: + ida_simple_remove(&nd_region->btt_ida, nd_btt->id); + +out_nd_btt: + kfree(nd_btt); + return NULL; } struct device *nd_btt_create(struct nd_region *nd_region) diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 7849bf1812c4..f293556cbbf6 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2249,9 +2249,12 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, if (!nsblk->uuid) goto blk_err; memcpy(name, nd_label->name, NSLABEL_NAME_LEN); - if (name[0]) + if (name[0]) { nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL); + if (!nsblk->alt_name) + goto blk_err; + } res = nsblk_add_resource(nd_region, ndd, nsblk, __le64_to_cpu(nd_label->dpa)); if (!res) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index bc2f700feef8..0279eb1da3ef 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -113,13 +113,13 @@ static void write_pmem(void *pmem_addr, struct page *page, while (len) { mem = kmap_atomic(page); - chunk = min_t(unsigned int, len, PAGE_SIZE); + chunk = min_t(unsigned int, len, PAGE_SIZE - off); memcpy_flushcache(pmem_addr, mem + off, chunk); kunmap_atomic(mem); len -= chunk; off = 0; page++; - pmem_addr += PAGE_SIZE; + pmem_addr += chunk; } } @@ -132,7 +132,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, while (len) { mem = kmap_atomic(page); - chunk = min_t(unsigned int, len, PAGE_SIZE); + chunk = min_t(unsigned int, len, PAGE_SIZE - off); rem = memcpy_mcsafe(mem + off, pmem_addr, chunk); kunmap_atomic(mem); if (rem) @@ -140,7 +140,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, len -= chunk; off = 0; page++; - pmem_addr += PAGE_SIZE; + pmem_addr += chunk; } return BLK_STS_OK; } diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index f8bb746a549f..a570f2263a42 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -22,6 +22,8 @@ static bool key_revalidate = true; module_param(key_revalidate, bool, 0444); MODULE_PARM_DESC(key_revalidate, "Require key validation at init."); +static const char zero_key[NVDIMM_PASSPHRASE_LEN]; + static void *key_data(struct key *key) { struct encrypted_key_payload *epayload = dereference_key_locked(key); @@ -75,6 +77,16 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm) return key; } +static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm, + struct key **key) +{ + *key = nvdimm_request_key(nvdimm); + if (!*key) + return zero_key; + + return key_data(*key); +} + static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, key_serial_t id, int subclass) { @@ -105,36 +117,57 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, return key; } -static struct key *nvdimm_key_revalidate(struct nvdimm *nvdimm) +static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm, + key_serial_t id, int subclass, struct key **key) +{ + *key = NULL; + if (id == 0) { + if (subclass == NVDIMM_BASE_KEY) + return zero_key; + else + return NULL; + } + + *key = nvdimm_lookup_user_key(nvdimm, id, subclass); + if (!*key) + return NULL; + + return key_data(*key); +} + + +static int nvdimm_key_revalidate(struct nvdimm *nvdimm) { struct key *key; int rc; + const void *data; if (!nvdimm->sec.ops->change_key) - return NULL; + return -EOPNOTSUPP; - key = nvdimm_request_key(nvdimm); - if (!key) - return NULL; + data = nvdimm_get_key_payload(nvdimm, &key); /* * Send the same key to the hardware as new and old key to * verify that the key is good. */ - rc = nvdimm->sec.ops->change_key(nvdimm, key_data(key), - key_data(key), NVDIMM_USER); + rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER); if (rc < 0) { nvdimm_put_key(key); - key = NULL; + return rc; } - return key; + + nvdimm_put_key(key); + nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER); + return 0; } static int __nvdimm_security_unlock(struct nvdimm *nvdimm) { struct device *dev = &nvdimm->dev; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - struct key *key = NULL; + struct key *key; + const void *data; int rc; /* The bus lock should be held at the top level of the call stack */ @@ -160,16 +193,11 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) if (!key_revalidate) return 0; - key = nvdimm_key_revalidate(nvdimm); - if (!key) - return nvdimm_security_freeze(nvdimm); + return nvdimm_key_revalidate(nvdimm); } else - key = nvdimm_request_key(nvdimm); + data = nvdimm_get_key_payload(nvdimm, &key); - if (!key) - return -ENOKEY; - - rc = nvdimm->sec.ops->unlock(nvdimm, key_data(key)); + rc = nvdimm->sec.ops->unlock(nvdimm, data); dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); @@ -195,6 +223,7 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid) struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); struct key *key; int rc; + const void *data; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -214,11 +243,12 @@ int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid) return -EBUSY; } - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) return -ENOKEY; - rc = nvdimm->sec.ops->disable(nvdimm, key_data(key)); + rc = nvdimm->sec.ops->disable(nvdimm, data); dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); @@ -235,6 +265,7 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid, struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); struct key *key, *newkey; int rc; + const void *data, *newdata; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -249,22 +280,19 @@ int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid, return -EIO; } - if (keyid == 0) - key = NULL; - else { - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) - return -ENOKEY; - } + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) + return -ENOKEY; - newkey = nvdimm_lookup_user_key(nvdimm, new_keyid, NVDIMM_NEW_KEY); - if (!newkey) { + newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid, + NVDIMM_NEW_KEY, &newkey); + if (!newdata) { nvdimm_put_key(key); return -ENOKEY; } - rc = nvdimm->sec.ops->change_key(nvdimm, key ? key_data(key) : NULL, - key_data(newkey), pass_type); + rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type); dev_dbg(dev, "key: %d %d update%s: %s\n", key_serial(key), key_serial(newkey), pass_type == NVDIMM_MASTER ? "(master)" : "(user)", @@ -286,8 +314,9 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid, { struct device *dev = &nvdimm->dev; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - struct key *key; + struct key *key = NULL; int rc; + const void *data; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -319,11 +348,12 @@ int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid, return -EOPNOTSUPP; } - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) return -ENOKEY; - rc = nvdimm->sec.ops->erase(nvdimm, key_data(key), pass_type); + rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type); dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key), pass_type == NVDIMM_MASTER ? "(master)" : "(user)", rc == 0 ? "success" : "fail"); @@ -337,8 +367,9 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) { struct device *dev = &nvdimm->dev; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - struct key *key; + struct key *key = NULL; int rc; + const void *data; /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); @@ -368,15 +399,12 @@ int nvdimm_security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) return -EBUSY; } - if (keyid == 0) - key = NULL; - else { - key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY); - if (!key) - return -ENOKEY; - } + data = nvdimm_get_user_key_payload(nvdimm, keyid, + NVDIMM_BASE_KEY, &key); + if (!data) + return -ENOKEY; - rc = nvdimm->sec.ops->overwrite(nvdimm, key ? key_data(key) : NULL); + rc = nvdimm->sec.ops->overwrite(nvdimm, data); dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 470601980794..2c43e12b70af 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -288,7 +288,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved) "Cancelling I/O %d", req->tag); nvme_req(req)->status = NVME_SC_ABORT_REQ; - blk_mq_complete_request(req); + blk_mq_complete_request_sync(req); return true; } EXPORT_SYMBOL_GPL(nvme_cancel_request); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f3b9d91ba0df..6d8451356eac 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1845,7 +1845,7 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) memset(queue, 0, sizeof(*queue)); queue->ctrl = ctrl; queue->qnum = idx; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); queue->dev = ctrl->dev; if (idx > 0) @@ -1887,7 +1887,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) */ queue->connection_id = 0; - atomic_set(&queue->csn, 1); + atomic_set(&queue->csn, 0); } static void @@ -2183,7 +2183,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, { struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; - u32 csn; int ret, opstate; /* @@ -2198,8 +2197,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, /* format the FC-NVME CMD IU and fcp_req */ cmdiu->connection_id = cpu_to_be64(queue->connection_id); - csn = atomic_inc_return(&queue->csn); - cmdiu->csn = cpu_to_be32(csn); cmdiu->data_len = cpu_to_be32(data_len); switch (io_dir) { case NVMEFC_FCP_WRITE: @@ -2257,11 +2254,24 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, if (!(op->flags & FCOP_FLAGS_AEN)) blk_mq_start_request(op->rq); + cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, &ctrl->rport->remoteport, queue->lldd_handle, &op->fcp_req); if (ret) { + /* + * If the lld fails to send the command is there an issue with + * the csn value? If the command that fails is the Connect, + * no - as the connection won't be live. If it is a command + * post-connect, it's possible a gap in csn may be created. + * Does this matter? As Linux initiators don't send fused + * commands, no. The gap would exist, but as there's nothing + * that depends on csn order to be delivered on the target + * side, it shouldn't hurt. It would be difficult for a + * target to even detect the csn gap as it has no idea when the + * cmd with the csn was supposed to arrive. + */ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 76250181fee0..9f72d515fc4b 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +u64 nvmet_get_log_page_offset(struct nvme_command *cmd) +{ + return le64_to_cpu(cmd->get_log_page.lpo); +} + static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) { nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index c872b47a88f3..33ed95e72d6b 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -131,54 +131,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); } +static size_t discovery_log_entries(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_subsys_link *p; + struct nvmet_port *r; + size_t entries = 0; + + list_for_each_entry(p, &req->port->subsystems, entry) { + if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) + continue; + entries++; + } + list_for_each_entry(r, &req->port->referrals, entry) + entries++; + return entries; +} + static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) { const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmf_disc_rsp_page_hdr *hdr; + u64 offset = nvmet_get_log_page_offset(req->cmd); size_t data_len = nvmet_get_log_page_len(req->cmd); - size_t alloc_len = max(data_len, sizeof(*hdr)); - int residual_len = data_len - sizeof(*hdr); + size_t alloc_len; struct nvmet_subsys_link *p; struct nvmet_port *r; u32 numrec = 0; u16 status = 0; + void *buffer; + + /* Spec requires dword aligned offsets */ + if (offset & 0x3) { + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + goto out; + } /* * Make sure we're passing at least a buffer of response header size. * If host provided data len is less than the header size, only the * number of bytes requested by host will be sent to host. */ - hdr = kzalloc(alloc_len, GFP_KERNEL); - if (!hdr) { + down_read(&nvmet_config_sem); + alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req); + buffer = kzalloc(alloc_len, GFP_KERNEL); + if (!buffer) { + up_read(&nvmet_config_sem); status = NVME_SC_INTERNAL; goto out; } - down_read(&nvmet_config_sem); + hdr = buffer; list_for_each_entry(p, &req->port->subsystems, entry) { + char traddr[NVMF_TRADDR_SIZE]; + if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn)) continue; - if (residual_len >= entry_size) { - char traddr[NVMF_TRADDR_SIZE]; - - nvmet_set_disc_traddr(req, req->port, traddr); - nvmet_format_discovery_entry(hdr, req->port, - p->subsys->subsysnqn, traddr, - NVME_NQN_NVME, numrec); - residual_len -= entry_size; - } + + nvmet_set_disc_traddr(req, req->port, traddr); + nvmet_format_discovery_entry(hdr, req->port, + p->subsys->subsysnqn, traddr, + NVME_NQN_NVME, numrec); numrec++; } list_for_each_entry(r, &req->port->referrals, entry) { - if (residual_len >= entry_size) { - nvmet_format_discovery_entry(hdr, r, - NVME_DISC_SUBSYS_NAME, - r->disc_addr.traddr, - NVME_NQN_DISC, numrec); - residual_len -= entry_size; - } + nvmet_format_discovery_entry(hdr, r, + NVME_DISC_SUBSYS_NAME, + r->disc_addr.traddr, + NVME_NQN_DISC, numrec); numrec++; } @@ -190,8 +212,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) up_read(&nvmet_config_sem); - status = nvmet_copy_to_sgl(req, 0, hdr, data_len); - kfree(hdr); + status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len); + kfree(buffer); out: nvmet_req_complete(req, status); } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 51e49efd7849..1653d19b187f 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); u32 nvmet_get_log_page_len(struct nvme_command *cmd); +u64 nvmet_get_log_page_offset(struct nvme_command *cmd); extern struct list_head *nvmet_ports; void nvmet_port_disc_changed(struct nvmet_port *port, diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 1be571c20062..6bad04cbb1d3 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -157,8 +157,12 @@ #define DBG_IRT(x...) #endif +#ifdef CONFIG_64BIT +#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa)) +#else #define COMPARE_IRTE_ADDR(irte, hpa) \ - ((irte)->dest_iosapic_addr == F_EXTEND(hpa)) + ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL)) +#endif #define IOSAPIC_REG_SELECT 0x00 #define IOSAPIC_REG_WINDOW 0x10 diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 3f3df4c29f6e..905282a8ddaa 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal) * removed from the slot/adapter. */ msleep(1000); + + /* Ignore link or presence changes caused by power off */ + atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC), + &ctrl->pending_events); } /* turn off Green LED */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a59ad09ce911..a077f67fe1da 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, quirk_dma_func1_alias); diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c index 8f018b3f3cd4..c7039f52ad51 100644 --- a/drivers/platform/x86/pmc_atom.c +++ b/drivers/platform/x86/pmc_atom.c @@ -17,6 +17,7 @@ #include <linux/debugfs.h> #include <linux/device.h> +#include <linux/dmi.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_data/x86/clk-pmc-atom.h> @@ -391,11 +392,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc) } #endif /* CONFIG_DEBUG_FS */ +/* + * Some systems need one or more of their pmc_plt_clks to be + * marked as critical. + */ +static const struct dmi_system_id critclk_systems[] = { + { + .ident = "MPL CEC1x", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"), + DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"), + }, + }, + { /*sentinel*/ } +}; + static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, const struct pmc_data *pmc_data) { struct platform_device *clkdev; struct pmc_clk_data *clk_data; + const struct dmi_system_id *d = dmi_first_match(critclk_systems); clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); if (!clk_data) @@ -403,6 +420,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap, clk_data->base = pmc_regmap; /* offset is added by client */ clk_data->clks = pmc_data->clks; + if (d) { + clk_data->critical = true; + pr_info("%s critclks quirk enabled\n", d->ident); + } clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom", PLATFORM_DEVID_NONE, diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c index ad969d9fc981..c2644a9fe80f 100644 --- a/drivers/power/supply/goldfish_battery.c +++ b/drivers/power/supply/goldfish_battery.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL +// SPDX-License-Identifier: GPL-2.0 /* * Power supply driver for the goldfish emulator * diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c index 91751617b37a..c53a2185a039 100644 --- a/drivers/reset/reset-meson-audio-arb.c +++ b/drivers/reset/reset-meson-audio-arb.c @@ -130,6 +130,7 @@ static int meson_audio_arb_probe(struct platform_device *pdev) arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits); arb->rstc.ops = &meson_audio_arb_rstc_ops; arb->rstc.of_node = dev->of_node; + arb->rstc.owner = THIS_MODULE; /* * Enable general : diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index a71734c41693..f933c06bff4f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -667,9 +667,9 @@ config RTC_DRV_S5M will be called rtc-s5m. config RTC_DRV_SD3078 - tristate "ZXW Crystal SD3078" + tristate "ZXW Shenzhen whwave SD3078" help - If you say yes here you get support for the ZXW Crystal + If you say yes here you get support for the ZXW Shenzhen whwave SD3078 RTC chips. This driver can also be built as a module. If so, the module diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c index e5444296075e..4d6bf9304ceb 100644 --- a/drivers/rtc/rtc-cros-ec.c +++ b/drivers/rtc/rtc-cros-ec.c @@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); if (device_may_wakeup(dev)) - enable_irq_wake(cros_ec_rtc->cros_ec->irq); + return enable_irq_wake(cros_ec_rtc->cros_ec->irq); return 0; } @@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev) struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); if (device_may_wakeup(dev)) - disable_irq_wake(cros_ec_rtc->cros_ec->irq); + return disable_irq_wake(cros_ec_rtc->cros_ec->irq); return 0; } diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index b4e054c64bad..69b54e5556c0 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c @@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev) da9063_data_to_tm(data, &rtc->alarm_time, rtc); rtc->rtc_sync = false; + /* + * TODO: some models have alarms on a minute boundary but still support + * real hardware interrupts. Add this once the core supports it. + */ + if (config->rtc_data_start != RTC_SEC) + rtc->rtc_dev->uie_unsupported = 1; + irq_alarm = platform_get_irq_byname(pdev, "ALARM"); ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, da9063_alarm_event, diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index d417b203cbc5..1d3de2a3d1a4 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) { unsigned int byte; - int value = 0xff; /* return 0xff for ignored values */ + int value = -1; /* return -1 for ignored values */ byte = readb(rtc->regbase + reg_off); if (byte & AR_ENB) { diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 6e294b4d3635..f89f9d02e788 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); raw: - block->blocks = (private->real_cyl * + block->blocks = ((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk); dev_info(&device->cdev->dev, - "DASD with %d KB/block, %d KB total size, %d KB/track, " + "DASD with %u KB/block, %lu KB total size, %u KB/track, " "%s\n", (block->bp_block >> 10), - ((private->real_cyl * + (((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk * (block->bp_block >> 9)) >> 1), ((blk_per_trk * block->bp_block) >> 10), diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index fd2146bcc0ad..e17364e13d2f 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -629,7 +629,7 @@ con3270_init(void) (void (*)(unsigned long)) con3270_read_tasklet, (unsigned long) condev->read); - raw3270_add_view(&condev->view, &con3270_fn, 1); + raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ); INIT_LIST_HEAD(&condev->freemem); for (i = 0; i < CON3270_STRING_PAGES; i++) { diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 8f3a2eeb28dc..8b48ba9c598e 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp) init_waitqueue_head(&fp->wait); fp->fs_pid = get_pid(task_pid(current)); - rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); + rc = raw3270_add_view(&fp->view, &fs3270_fn, minor, + RAW3270_VIEW_LOCK_BH); if (rc) { fs3270_free_view(&fp->view); goto out; diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index f8cd2935fbfd..63a41b168761 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view) * Add view to device with minor "minor". */ int -raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) +raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass) { unsigned long flags; struct raw3270 *rp; @@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) view->cols = rp->cols; view->ascebc = rp->ascebc; spin_lock_init(&view->lock); + lockdep_set_subclass(&view->lock, subclass); list_add(&view->list, &rp->view_list); rc = 0; spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 114ca7cbf889..3afaa35f7351 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -150,6 +150,8 @@ struct raw3270_fn { struct raw3270_view { struct list_head list; spinlock_t lock; +#define RAW3270_VIEW_LOCK_IRQ 0 +#define RAW3270_VIEW_LOCK_BH 1 atomic_t ref_count; struct raw3270 *dev; struct raw3270_fn *fn; @@ -158,7 +160,7 @@ struct raw3270_view { unsigned char *ascebc; /* ascii -> ebcdic table */ }; -int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int); +int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int); int raw3270_activate_view(struct raw3270_view *); void raw3270_del_view(struct raw3270_view *); void raw3270_deactivate_view(struct raw3270_view *); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 2b0c36c2c568..98d7fc152e32 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) return PTR_ERR(tp); rc = raw3270_add_view(&tp->view, &tty3270_fn, - tty->index + RAW3270_FIRSTMINOR); + tty->index + RAW3270_FIRSTMINOR, + RAW3270_VIEW_LOCK_BH); if (rc) { tty3270_free_view(tp); return rc; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 6a340f2c3556..5ea83dc4f1d7 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -751,8 +751,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq) __ap_flush_queue(aq); /* set REMOVE state to prevent new messages are queued in */ aq->state = AP_STATE_REMOVE; - del_timer_sync(&aq->timeout); spin_unlock_bh(&aq->lock); + del_timer_sync(&aq->timeout); } void ap_queue_remove(struct ap_queue *aq) diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3e85d665c572..45eb0c14b880 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -51,7 +51,8 @@ static debug_info_t *debug_info; static void __init pkey_debug_init(void) { - debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long)); + /* 5 arguments per dbf entry (including the format string ptr) */ + debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); debug_register_view(debug_info, &debug_sprintf_view); debug_set_level(debug_info, 3); } diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c index 3d401d02c019..bdd177e3d762 100644 --- a/drivers/scsi/aic7xxx/aic7770_osm.c +++ b/drivers/scsi/aic7xxx/aic7770_osm.c @@ -91,6 +91,7 @@ aic7770_probe(struct device *dev) ahc = ahc_alloc(&aic7xxx_driver_template, name); if (ahc == NULL) return (ENOMEM); + ahc->dev = dev; error = aic7770_config(ahc, aic7770_ident_table + edev->id.driver_data, eisaBase); if (error != 0) { diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 5614921b4041..88b90f9806c9 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -943,6 +943,7 @@ struct ahc_softc { * Platform specific device information. */ ahc_dev_softc_t dev_softc; + struct device *dev; /* * Bus specific device information. diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 3c9c17450bb3..d5c4a0d23706 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -860,8 +860,8 @@ int ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { - *vaddr = pci_alloc_consistent(ahc->dev_softc, - dmat->maxsize, mapp); + /* XXX: check if we really need the GFP_ATOMIC and unwind this mess! */ + *vaddr = dma_alloc_coherent(ahc->dev, dmat->maxsize, mapp, GFP_ATOMIC); if (*vaddr == NULL) return ENOMEM; return 0; @@ -871,8 +871,7 @@ void ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat, void* vaddr, bus_dmamap_t map) { - pci_free_consistent(ahc->dev_softc, dmat->maxsize, - vaddr, map); + dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); } int @@ -1123,8 +1122,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa host->transportt = ahc_linux_transport_template; - retval = scsi_add_host(host, - (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); + retval = scsi_add_host(host, ahc->dev); if (retval) { printk(KERN_WARNING "aic7xxx: scsi_add_host failed\n"); scsi_host_put(host); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 0fc14dac7070..717d8d1082ce 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -250,6 +250,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } ahc->dev_softc = pci; + ahc->dev = &pci->dev; error = ahc_pci_config(ahc, entry); if (error != 0) { ahc_free(ahc); diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 462560b2855e..469d0bc9f5fe 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) } out: - if (req->nsge > 0) + if (req->nsge > 0) { scsi_dma_unmap(cmnd); + if (req->dcopy && (host_status == DID_OK)) + host_status = csio_scsi_copy_to_sgl(hw, req); + } cmnd->result = (((host_status) << 16) | scsi_status); cmnd->scsi_done(cmnd); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index dfba4921b265..5bf61431434b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -2162,7 +2162,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", fc_rport_state(rdata)); - rdata->flags &= ~FC_RP_STARTED; fc_rport_enter_delete(rdata, RPORT_EV_STOP); mutex_unlock(&rdata->rp_mutex); kref_put(&rdata->kref, fc_rport_destroy); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c98f264f1d83..a497b2c0cb79 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * wake up the thread. */ spin_lock(&lpfc_cmd->buf_lock); - if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) { - lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; - if (lpfc_cmd->waitq) - wake_up(lpfc_cmd->waitq); + lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; + if (lpfc_cmd->waitq) { + wake_up(lpfc_cmd->waitq); lpfc_cmd->waitq = NULL; } spin_unlock(&lpfc_cmd->buf_lock); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e74a62448ba4..e5db9a9954dc 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) { - struct qedi_nvm_iscsi_image nvm_image; - qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, - sizeof(nvm_image), + sizeof(struct qedi_nvm_iscsi_image), &qedi->nvm_buf_dma, GFP_KERNEL); if (!qedi->iscsi_image) { QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); @@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data) static int qedi_get_boot_info(struct qedi_ctx *qedi) { int ret = 1; - struct qedi_nvm_iscsi_image nvm_image; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Get NVM iSCSI CFG image\n"); ret = qedi_ops->common->nvm_get_image(qedi->cdev, QED_NVM_IMAGE_ISCSI_CFG, (char *)qedi->iscsi_image, - sizeof(nvm_image)); + sizeof(struct qedi_nvm_iscsi_image)); if (ret) QEDI_ERR(&qedi->dbg_ctx, "Could not get NVM image. ret = %d\n", ret); diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c4cbfd07b916..a08ff3bd6310 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -238,6 +238,7 @@ static struct { {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 5a58cbf3a75d..c14006ac98f9 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"NETAPP", "INF-01-00", "rdac", }, {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, + {"LENOVO", "DE_Series", "rdac", }, {NULL, NULL, NULL }, }; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 601b9f1de267..07dfc17d4824 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1706,8 +1706,12 @@ out_put_budget: ret = BLK_STS_DEV_RESOURCE; break; default: + if (unlikely(!scsi_device_online(sdev))) + scsi_req(req)->result = DID_NO_CONNECT << 16; + else + scsi_req(req)->result = DID_ERROR << 16; /* - * Make sure to release all allocated ressources when + * Make sure to release all allocated resources when * we hit an error, as we will never see this command * again. */ diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 84380bae20f1..8472de1007ff 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -385,7 +385,7 @@ enum storvsc_request_type { * This is the end of Protocol specific defines. */ -static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); +static int storvsc_ringbuffer_size = (128 * 1024); static u32 max_outstanding_req_per_channel; static int storvsc_vcpus_per_sub_channel = 4; @@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) { struct device *dev = &device->device; struct storvsc_device *stor_device; - int num_cpus = num_online_cpus(); int num_sc; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t; - num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); + /* + * If the number of CPUs is artificially restricted, such as + * with maxcpus=1 on the kernel boot line, Hyper-V could offer + * sub-channels >= the number of CPUs. These sub-channels + * should not be created. The primary channel is already created + * and assigned to one CPU, so check against # CPUs - 1. + */ + num_sc = min((int)(num_online_cpus() - 1), max_chns); + if (!num_sc) + return; + stor_device = get_out_stor_device(device); if (!stor_device) return; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 8af01777d09c..f8cb7c23305b 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -793,6 +793,7 @@ static int virtscsi_probe(struct virtio_device *vdev) /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c index 808ed92ed66f..1bb1cb651349 100644 --- a/drivers/staging/comedi/drivers/ni_usb6501.c +++ b/drivers/staging/comedi/drivers/ni_usb6501.c @@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev) size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_tx_buf) { - kfree(devpriv->usb_rx_buf); + if (!devpriv->usb_tx_buf) return -ENOMEM; - } return 0; } @@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev, if (!devpriv) return -ENOMEM; + mutex_init(&devpriv->mut); + usb_set_intfdata(intf, devpriv); + ret = ni6501_find_endpoints(dev); if (ret) return ret; @@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev, if (ret) return ret; - mutex_init(&devpriv->mut); - usb_set_intfdata(intf, devpriv); - ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 6234b649d887..65dc6c51037e 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) size = usb_endpoint_maxp(devpriv->ep_tx); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); - if (!devpriv->usb_tx_buf) { - kfree(devpriv->usb_rx_buf); + if (!devpriv->usb_tx_buf) return -ENOMEM; - } return 0; } @@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, devpriv->model = board->model; + sema_init(&devpriv->limit_sem, 8); + ret = vmk80xx_find_usb_endpoints(dev); if (ret) return ret; @@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev, if (ret) return ret; - sema_init(&devpriv->limit_sem, 8); - usb_set_intfdata(intf, devpriv); if (devpriv->model == VMK8055_MODEL) diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c index 526e0dbea5b5..81af768e7248 100644 --- a/drivers/staging/erofs/data.c +++ b/drivers/staging/erofs/data.c @@ -298,7 +298,7 @@ submit_bio_retry: *last_block = current_block; /* shift in advance in case of it followed by too many gaps */ - if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) { + if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { /* err should reassign to 0 after submitting */ err = 0; goto submit_bio_out; diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index acdbc07fd259..2fc8bc22b57b 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -109,10 +109,10 @@ #define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */ #define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */ -#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */ -#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */ -#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */ -#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */ +#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */ +#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */ +#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */ +#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */ #define AD7193_CH_TEMP 0x100 /* Temp senseor */ #define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */ #define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */ diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c index 029c3bf42d4d..07774c000c5a 100644 --- a/drivers/staging/iio/meter/ade7854.c +++ b/drivers/staging/iio/meter/ade7854.c @@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644, static IIO_DEV_ATTR_IPEAK(0644, ade7854_read_32bit, ade7854_write_32bit, - ADE7854_VPEAK); + ADE7854_IPEAK); static IIO_DEV_ATTR_APHCAL(0644, ade7854_read_16bit, ade7854_write_16bit, diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c index 18936cdb1083..956daf8c3bd2 100644 --- a/drivers/staging/most/core.c +++ b/drivers/staging/most/core.c @@ -1431,7 +1431,7 @@ int most_register_interface(struct most_interface *iface) INIT_LIST_HEAD(&iface->p->channel_list); iface->p->dev_id = id; - snprintf(iface->p->name, STRING_SIZE, "mdev%d", id); + strcpy(iface->p->name, iface->description); iface->dev.init_name = iface->p->name; iface->dev.bus = &mc.bus; iface->dev.parent = &mc.dev; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 09a183dfc526..a31db15cd7c0 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1520,11 +1520,13 @@ static int __init sc16is7xx_init(void) #endif return ret; +#ifdef CONFIG_SERIAL_SC16IS7XX_SPI err_spi: +#endif #ifdef CONFIG_SERIAL_SC16IS7XX_I2C i2c_del_driver(&sc16is7xx_i2c_uart_driver); -#endif err_i2c: +#endif uart_unregister_driver(&sc16is7xx_uart); return ret; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 2d1c626312cd..3cd139752d3f 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2512,14 +2512,16 @@ done: * center of the last stop bit in sampling clocks. */ int last_stop = bits * 2 - 1; - int deviation = min_err * srr * last_stop / 2 / baud; + int deviation = DIV_ROUND_CLOSEST(min_err * last_stop * + (int)(srr + 1), + 2 * (int)baud); if (abs(deviation) >= 2) { /* At least two sampling clocks off at the * last stop bit; we can increase the error * margin by shifting the sampling point. */ - int shift = min(-8, max(7, deviation / 2)); + int shift = clamp(deviation / 2, -8, 7); hssrr |= (shift << HSCIF_SRHP_SHIFT) & HSCIF_SRHP_MASK; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index d34984aa646d..650c66886c80 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1520,7 +1520,8 @@ static void csi_J(struct vc_data *vc, int vpar) return; } scr_memsetw(start, vc->vc_video_erase_char, 2 * count); - update_region(vc, (unsigned long) start, count); + if (con_should_update(vc)) + do_update_region(vc, (unsigned long) start, count); vc->vc_need_wrap = 0; } diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index a25659b5a5d1..3fa20e95a6bb 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void) rc = pci_add_dynid(&vfio_pci_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) - pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", + pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n", vendor, device, subvendor, subdevice, class, class_mask, rc); else - pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", + pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n", vendor, device, subvendor, subdevice, class, class_mask); } diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 8dbb270998f4..6b64e45a5269 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1398,7 +1398,7 @@ unlock_exit: mutex_unlock(&container->lock); } -const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { +static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { .name = "iommu-vfio-powerpc", .owner = THIS_MODULE, .open = tce_iommu_open, diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 73652e21efec..d0f731c9920a 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -58,12 +58,18 @@ module_param_named(disable_hugepages, MODULE_PARM_DESC(disable_hugepages, "Disable VFIO IOMMU support for IOMMU hugepages."); +static unsigned int dma_entry_limit __read_mostly = U16_MAX; +module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); +MODULE_PARM_DESC(dma_entry_limit, + "Maximum number of user DMA mappings per container (65535)."); + struct vfio_iommu { struct list_head domain_list; struct vfio_domain *external_domain; /* domain for external user */ struct mutex lock; struct rb_root dma_list; struct blocking_notifier_head notifier; + unsigned int dma_avail; bool v2; bool nesting; }; @@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) vfio_unlink_dma(iommu, dma); put_task_struct(dma->task); kfree(dma); + iommu->dma_avail++; } static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) @@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, goto out_unlock; } + if (!iommu->dma_avail) { + ret = -ENOSPC; + goto out_unlock; + } + dma = kzalloc(sizeof(*dma), GFP_KERNEL); if (!dma) { ret = -ENOMEM; goto out_unlock; } + iommu->dma_avail--; dma->iova = iova; dma->vaddr = vaddr; dma->prot = prot; @@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) INIT_LIST_HEAD(&iommu->domain_list); iommu->dma_list = RB_ROOT; + iommu->dma_avail = dma_entry_limit; mutex_init(&iommu->lock); BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 5ace833de746..351af88231ad 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem, u64 start, u64 size, u64 end, u64 userspace_addr, int perm) { - struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); + struct vhost_umem_node *tmp, *node; + if (!size) + return -EFAULT; + + node = kmalloc(sizeof(*node), GFP_ATOMIC); if (!node) return -ENOMEM; diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index d0584c040c60..7a0398bb84f7 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -255,9 +255,11 @@ void vp_del_vqs(struct virtio_device *vdev) for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); - for (i = 0; i < vp_dev->msix_vectors; i++) - if (vp_dev->msix_affinity_masks[i]) - free_cpumask_var(vp_dev->msix_affinity_masks[i]); + if (vp_dev->msix_affinity_masks) { + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + } if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 18846afb39da..5df92c308286 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -882,6 +882,8 @@ static struct virtqueue *vring_create_virtqueue_split( GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO); if (queue) break; + if (!may_reduce_num) + return NULL; } if (!num) diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index de01a6d0059d..a1c61e351d3f 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c @@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), - GFP_KERNEL); + vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL); if (!vma_priv) return -ENOMEM; diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index c3e201025ef0..0782ff3c2273 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) if (xen_store_evtchn == 0) return -ENOENT; - nonseekable_open(inode, filp); - - filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ + stream_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) |