summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/nbd.c117
-rw-r--r--drivers/block/null_blk/main.c25
-rw-r--r--drivers/dpll/dpll_netlink.c17
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-asus.c27
-rw-r--r--drivers/hid/hid-core.c12
-rw-r--r--drivers/hid/hid-debug.c3
-rw-r--r--drivers/hid/hid-glorious.c16
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-logitech-dj.c11
-rw-r--r--drivers/hid/hid-mcp2221.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/btree.c11
-rw-r--r--drivers/md/bcache/super.c4
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c24
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c8
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c66
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c2
-rw-r--r--drivers/net/netkit.c22
-rw-r--r--drivers/net/usb/aqc111.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/veth.c46
-rw-r--r--drivers/net/vrf.c38
-rw-r--r--drivers/net/wireguard/device.c4
-rw-r--r--drivers/net/wireguard/receive.c12
-rw-r--r--drivers/net/wireguard/send.c3
-rw-r--r--drivers/nfc/virtual_ncidev.c7
-rw-r--r--drivers/nvme/host/auth.c5
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fc.c19
-rw-r--r--drivers/nvme/host/rdma.c1
-rw-r--r--drivers/nvme/host/tcp.c32
-rw-r--r--drivers/nvme/target/Kconfig4
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c4
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c31
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c26
-rw-r--r--drivers/platform/x86/ideapad-laptop.c11
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c4
-rw-r--r--drivers/s390/block/dasd.c24
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/ism_drv.c93
66 files changed, 611 insertions, 453 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 855fdf5c3b4e..b6414e1e645b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -67,6 +67,7 @@ struct nbd_sock {
struct recv_thread_args {
struct work_struct work;
struct nbd_device *nbd;
+ struct nbd_sock *nsock;
int index;
};
@@ -395,6 +396,22 @@ static u32 req_to_nbd_cmd_type(struct request *req)
}
}
+static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
+{
+ if (refcount_inc_not_zero(&nbd->config_refs)) {
+ /*
+ * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
+ * and reading nbd->config is ordered. The pair is the barrier in
+ * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
+ * before nbd->config.
+ */
+ smp_mb__after_atomic();
+ return nbd->config;
+ }
+
+ return NULL;
+}
+
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -409,13 +426,13 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
return BLK_EH_DONE;
}
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
cmd->status = BLK_STS_TIMEOUT;
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
mutex_unlock(&cmd->lock);
goto done;
}
- config = nbd->config;
if (config->num_connections > 1 ||
(config->num_connections == 1 && nbd->tag_set.timeout)) {
@@ -489,15 +506,9 @@ done:
return BLK_EH_DONE;
}
-/*
- * Send or receive packet. Return a positive value on success and
- * negtive value on failue, and never return 0.
- */
-static int sock_xmit(struct nbd_device *nbd, int index, int send,
- struct iov_iter *iter, int msg_flags, int *sent)
+static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
+ struct iov_iter *iter, int msg_flags, int *sent)
{
- struct nbd_config *config = nbd->config;
- struct socket *sock = config->socks[index]->sock;
int result;
struct msghdr msg;
unsigned int noreclaim_flag;
@@ -541,6 +552,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
}
/*
+ * Send or receive packet. Return a positive value on success and
+ * negtive value on failure, and never return 0.
+ */
+static int sock_xmit(struct nbd_device *nbd, int index, int send,
+ struct iov_iter *iter, int msg_flags, int *sent)
+{
+ struct nbd_config *config = nbd->config;
+ struct socket *sock = config->socks[index]->sock;
+
+ return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
+}
+
+/*
* Different settings for sk->sk_sndtimeo can result in different return values
* if there is a signal pending when we enter sendmsg, because reasons?
*/
@@ -696,7 +720,7 @@ out:
return 0;
}
-static int nbd_read_reply(struct nbd_device *nbd, int index,
+static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
@@ -705,7 +729,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index,
reply->magic = 0;
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
- result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
+ result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
if (result < 0) {
if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
@@ -829,14 +853,14 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct request_queue *q = nbd->disk->queue;
- struct nbd_sock *nsock;
+ struct nbd_sock *nsock = args->nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
struct nbd_reply reply;
- if (nbd_read_reply(nbd, args->index, &reply))
+ if (nbd_read_reply(nbd, nsock->sock, &reply))
break;
/*
@@ -871,7 +895,6 @@ static void recv_work(struct work_struct *work)
percpu_ref_put(&q->q_usage_counter);
}
- nsock = config->socks[args->index];
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
@@ -977,12 +1000,12 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_sock *nsock;
int ret;
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
return -EINVAL;
}
- config = nbd->config;
if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -1215,6 +1238,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
INIT_WORK(&args->work, recv_work);
args->index = i;
args->nbd = nbd;
+ args->nsock = nsock;
nsock->cookie++;
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
@@ -1397,6 +1421,7 @@ static int nbd_start_device(struct nbd_device *nbd)
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
args->nbd = nbd;
+ args->nsock = config->socks[i];
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
@@ -1530,17 +1555,20 @@ static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
return error;
}
-static struct nbd_config *nbd_alloc_config(void)
+static int nbd_alloc_and_init_config(struct nbd_device *nbd)
{
struct nbd_config *config;
+ if (WARN_ON(nbd->config))
+ return -EINVAL;
+
if (!try_module_get(THIS_MODULE))
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
if (!config) {
module_put(THIS_MODULE);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
atomic_set(&config->recv_threads, 0);
@@ -1548,12 +1576,24 @@ static struct nbd_config *nbd_alloc_config(void)
init_waitqueue_head(&config->conn_wait);
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
- return config;
+
+ nbd->config = config;
+ /*
+ * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
+ * its pair is the barrier in nbd_get_config_unlocked().
+ * So nbd_get_config_unlocked() won't see nbd->config as null after
+ * refcount_inc_not_zero() succeed.
+ */
+ smp_mb__before_atomic();
+ refcount_set(&nbd->config_refs, 1);
+
+ return 0;
}
static int nbd_open(struct gendisk *disk, blk_mode_t mode)
{
struct nbd_device *nbd;
+ struct nbd_config *config;
int ret = 0;
mutex_lock(&nbd_index_mutex);
@@ -1566,27 +1606,25 @@ static int nbd_open(struct gendisk *disk, blk_mode_t mode)
ret = -ENXIO;
goto out;
}
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
- struct nbd_config *config;
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
mutex_lock(&nbd->config_lock);
if (refcount_inc_not_zero(&nbd->config_refs)) {
mutex_unlock(&nbd->config_lock);
goto out;
}
- config = nbd_alloc_config();
- if (IS_ERR(config)) {
- ret = PTR_ERR(config);
+ ret = nbd_alloc_and_init_config(nbd);
+ if (ret) {
mutex_unlock(&nbd->config_lock);
goto out;
}
- nbd->config = config;
- refcount_set(&nbd->config_refs, 1);
+
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
- } else if (nbd_disconnected(nbd->config)) {
+ } else if (nbd_disconnected(config)) {
if (max_part)
set_bit(GD_NEED_PART_SCAN, &disk->state);
}
@@ -1990,22 +2028,17 @@ again:
pr_err("nbd%d already in use\n", index);
return -EBUSY;
}
- if (WARN_ON(nbd->config)) {
- mutex_unlock(&nbd->config_lock);
- nbd_put(nbd);
- return -EINVAL;
- }
- config = nbd_alloc_config();
- if (IS_ERR(config)) {
+
+ ret = nbd_alloc_and_init_config(nbd);
+ if (ret) {
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
pr_err("couldn't allocate config\n");
- return PTR_ERR(config);
+ return ret;
}
- nbd->config = config;
- refcount_set(&nbd->config_refs, 1);
- set_bit(NBD_RT_BOUND, &config->runtime_flags);
+ config = nbd->config;
+ set_bit(NBD_RT_BOUND, &config->runtime_flags);
ret = nbd_genl_size_set(info, nbd);
if (ret)
goto out;
@@ -2208,7 +2241,8 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
mutex_unlock(&nbd_index_mutex);
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
+ config = nbd_get_config_unlocked(nbd);
+ if (!config) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
nbd_put(nbd);
@@ -2216,7 +2250,6 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&nbd->config_lock);
- config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
!nbd->pid) {
dev_err(nbd_to_dev(nbd),
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 22a3cf7f32e2..3021d58ca51c 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1464,19 +1464,13 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
return BLK_STS_OK;
}
-static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
- sector_t nr_sectors, enum req_op op)
+static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
+ sector_t nr_sectors, enum req_op op)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
blk_status_t sts;
- if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
- sts = null_handle_throttled(cmd);
- if (sts != BLK_STS_OK)
- return sts;
- }
-
if (op == REQ_OP_FLUSH) {
cmd->error = errno_to_blk_status(null_handle_flush(nullb));
goto out;
@@ -1493,7 +1487,6 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
out:
nullb_complete_cmd(cmd);
- return BLK_STS_OK;
}
static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
@@ -1724,8 +1717,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->fake_timeout = should_timeout_request(rq) ||
blk_should_fake_timeout(rq->q);
- blk_mq_start_request(rq);
-
if (should_requeue_request(rq)) {
/*
* Alternate between hitting the core BUSY path, and the
@@ -1738,6 +1729,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
+ blk_status_t sts = null_handle_throttled(cmd);
+
+ if (sts != BLK_STS_OK)
+ return sts;
+ }
+
+ blk_mq_start_request(rq);
+
if (is_poll) {
spin_lock(&nq->poll_lock);
list_add_tail(&rq->queuelist, &nq->poll_list);
@@ -1747,7 +1747,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
if (cmd->fake_timeout)
return BLK_STS_OK;
- return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
+ null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
+ return BLK_STS_OK;
}
static void null_queue_rqs(struct request **rqlist)
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index a6dc3997bf5c..442a0ebeb953 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -1093,9 +1093,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_PIN_ID_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
-
+ }
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
ret = dpll_msg_add_pin_handle(msg, pin);
@@ -1123,8 +1124,10 @@ int dpll_nl_pin_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_PIN_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
+ }
ret = dpll_cmd_pin_get_one(msg, pin, info->extack);
if (ret) {
nlmsg_free(msg);
@@ -1256,8 +1259,10 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_DEVICE_ID_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
+ }
dpll = dpll_device_find_from_nlattr(info);
if (!IS_ERR(dpll)) {
@@ -1284,8 +1289,10 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &dpll_nl_family, 0,
DPLL_CMD_DEVICE_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(msg);
return -EMSGSIZE;
+ }
ret = dpll_device_get_one(dpll, msg, info->extack);
if (ret) {
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 3ca45975c686..d9e9829b2200 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -345,6 +345,8 @@ static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
{ "AONE" },
{ "GANSS" },
{ "Hailuck" },
+ { "Jamesdonkey" },
+ { "A3R" },
};
static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index fd61dba88233..78cdfb8b9a7a 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -381,7 +381,7 @@ static int asus_raw_event(struct hid_device *hdev,
return 0;
}
-static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size)
+static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size)
{
unsigned char *dmabuf;
int ret;
@@ -404,7 +404,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size
static int asus_kbd_init(struct hid_device *hdev)
{
- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54,
0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
int ret;
@@ -418,7 +418,7 @@ static int asus_kbd_init(struct hid_device *hdev)
static int asus_kbd_get_functions(struct hid_device *hdev,
unsigned char *kbd_func)
{
- u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 };
u8 *readbuf;
int ret;
@@ -449,7 +449,7 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
static int rog_nkey_led_init(struct hid_device *hdev)
{
- u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
+ const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 };
u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20,
0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 };
u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1,
@@ -1000,6 +1000,24 @@ static int asus_start_multitouch(struct hid_device *hdev)
return 0;
}
+static int __maybe_unused asus_resume(struct hid_device *hdev) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ int ret = 0;
+
+ if (drvdata->kbd_backlight) {
+ const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4,
+ drvdata->kbd_backlight->cdev.brightness };
+ ret = asus_kbd_set_report(hdev, buf, sizeof(buf));
+ if (ret < 0) {
+ hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret);
+ goto asus_resume_err;
+ }
+ }
+
+asus_resume_err:
+ return ret;
+}
+
static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
@@ -1294,6 +1312,7 @@ static struct hid_driver asus_driver = {
.input_configured = asus_input_configured,
#ifdef CONFIG_PM
.reset_resume = asus_reset_resume,
+ .resume = asus_resume,
#endif
.event = asus_event,
.raw_event = asus_raw_event
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8992e3c1e769..e0181218ad85 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -702,15 +702,22 @@ static void hid_close_report(struct hid_device *device)
* Free a device structure, all reports, and all fields.
*/
-static void hid_device_release(struct device *dev)
+void hiddev_free(struct kref *ref)
{
- struct hid_device *hid = to_hid_device(dev);
+ struct hid_device *hid = container_of(ref, struct hid_device, ref);
hid_close_report(hid);
kfree(hid->dev_rdesc);
kfree(hid);
}
+static void hid_device_release(struct device *dev)
+{
+ struct hid_device *hid = to_hid_device(dev);
+
+ kref_put(&hid->ref, hiddev_free);
+}
+
/*
* Fetch a report description item from the data stream. We support long
* items, though they are not used yet.
@@ -2846,6 +2853,7 @@ struct hid_device *hid_allocate_device(void)
spin_lock_init(&hdev->debug_list_lock);
sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock);
+ kref_init(&hdev->ref);
hid_bpf_device_init(hdev);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index e7ef1ea107c9..7dd83ec74f8a 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1135,6 +1135,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out;
}
list->hdev = (struct hid_device *) inode->i_private;
+ kref_get(&list->hdev->ref);
file->private_data = list;
mutex_init(&list->read_mutex);
@@ -1227,6 +1228,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
kfifo_free(&list->hid_debug_fifo);
+
+ kref_put(&list->hdev->ref, hiddev_free);
kfree(list);
return 0;
diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c
index 558eb08c19ef..281b3a7187ce 100644
--- a/drivers/hid/hid-glorious.c
+++ b/drivers/hid/hid-glorious.c
@@ -21,6 +21,10 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice");
* Glorious Model O and O- specify the const flag in the consumer input
* report descriptor, which leads to inputs being ignored. Fix this
* by patching the descriptor.
+ *
+ * Glorious Model I incorrectly specifes the Usage Minimum for its
+ * keyboard HID report, causing keycodes to be misinterpreted.
+ * Fix this by setting Usage Minimum to 0 in that report.
*/
static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
@@ -32,6 +36,10 @@ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[85] = rdesc[113] = rdesc[141] = \
HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE;
}
+ if (*rsize == 156 && rdesc[41] == 1) {
+ hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n");
+ rdesc[41] = 0;
+ }
return rdesc;
}
@@ -44,6 +52,8 @@ static void glorious_update_name(struct hid_device *hdev)
model = "Model O"; break;
case USB_DEVICE_ID_GLORIOUS_MODEL_D:
model = "Model D"; break;
+ case USB_DEVICE_ID_GLORIOUS_MODEL_I:
+ model = "Model I"; break;
}
snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model);
@@ -66,10 +76,12 @@ static int glorious_probe(struct hid_device *hdev,
}
static const struct hid_device_id glorious_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
USB_DEVICE_ID_GLORIOUS_MODEL_O) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS,
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH,
USB_DEVICE_ID_GLORIOUS_MODEL_D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW,
+ USB_DEVICE_ID_GLORIOUS_MODEL_I) },
{ }
};
MODULE_DEVICE_TABLE(hid, glorious_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f7973ccd84a2..c6e4e0d1f214 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -511,10 +511,6 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
-#define USB_VENDOR_ID_GLORIOUS 0x258a
-#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
-#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
-
#define I2C_VENDOR_ID_GOODIX 0x27c6
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
@@ -745,6 +741,9 @@
#define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
+#define USB_VENDOR_ID_LAVIEW 0x22D4
+#define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503
+
#define USB_VENDOR_ID_LCPOWER 0x1241
#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767
@@ -869,7 +868,6 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
-#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
@@ -1160,6 +1158,10 @@
#define USB_VENDOR_ID_SIGMATEL 0x066F
#define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780
+#define USB_VENDOR_ID_SINOWEALTH 0x258a
+#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033
+#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036
+
#define USB_VENDOR_ID_SIS_TOUCH 0x0457
#define USB_DEVICE_ID_SIS9200_TOUCH 0x9200
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 8afe3be683ba..e6a8b6d8eab7 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1695,12 +1695,11 @@ static int logi_dj_raw_event(struct hid_device *hdev,
}
/*
* Mouse-only receivers send unnumbered mouse data. The 27 MHz
- * receiver uses 6 byte packets, the nano receiver 8 bytes,
- * the lightspeed receiver (Pro X Superlight) 13 bytes.
+ * receiver uses 6 byte packets, the nano receiver 8 bytes.
*/
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
- size <= 13){
- u8 mouse_report[14];
+ size <= 8) {
+ u8 mouse_report[9];
/* Prepend report id */
mouse_report[0] = REPORT_TYPE_MOUSE;
@@ -1984,10 +1983,6 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp},
- { /* Logitech lightspeed receiver (0xc547) */
- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
- .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index 72883e0ce757..aef0785c91cc 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -1142,6 +1142,8 @@ static int mcp2221_probe(struct hid_device *hdev,
if (ret)
return ret;
+ hid_device_io_start(hdev);
+
/* Set I2C bus clock diviser */
if (i2c_clk_freq > 400)
i2c_clk_freq = 400;
@@ -1157,12 +1159,12 @@ static int mcp2221_probe(struct hid_device *hdev,
snprintf(mcp->adapter.name, sizeof(mcp->adapter.name),
"MCP2221 usb-i2c bridge");
+ i2c_set_adapdata(&mcp->adapter, mcp);
ret = devm_i2c_add_adapter(&hdev->dev, &mcp->adapter);
if (ret) {
hid_err(hdev, "can't add usb-i2c adapter: %d\n", ret);
return ret;
}
- i2c_set_adapdata(&mcp->adapter, mcp);
#if IS_REACHABLE(CONFIG_GPIOLIB)
/* Setup GPIO chip */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e098cc7b3944..fd5b0637dad6 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2046,6 +2046,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+ /* HONOR GLO-GXXX panel */
+ { .driver_data = MT_CLS_VTL,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 0x347d, 0x7853) },
+
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 5a48fcaa32f0..ea472923fab0 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 05be59ae21b2..6ae2329052c9 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -265,6 +265,7 @@ struct bcache_device {
#define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4
int nr_stripes;
+#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT)
unsigned int stripe_size;
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ae5cbb55861f..de3019972b35 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1000,6 +1000,9 @@ err:
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
+ *
+ * Note: Only error code or btree pointer will be returned, it is unncessary
+ * for callers to check NULL pointer.
*/
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
struct bkey *k, int level, bool write,
@@ -1111,6 +1114,10 @@ retry:
mutex_unlock(&b->c->bucket_lock);
}
+/*
+ * Only error code or btree pointer will be returned, it is unncessary for
+ * callers to check NULL pointer.
+ */
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
int level, bool wait,
struct btree *parent)
@@ -1368,7 +1375,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl);
- while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
+ while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3;
@@ -1532,6 +1539,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return 0;
n = btree_node_alloc_replacement(replace, NULL);
+ if (IS_ERR(n))
+ return 0;
/* recheck reserve after allocating replacement node */
if (btree_check_reserve(b, NULL)) {
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8bd899766372..bfe1685dbae5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -905,6 +905,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
if (!d->stripe_size)
d->stripe_size = 1 << 31;
+ else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
+ d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
if (!n || n > max_stripes) {
@@ -2016,7 +2018,7 @@ static int run_cache_set(struct cache_set *c)
c->root = bch_btree_node_get(c, NULL, k,
j->btree_level,
true, NULL);
- if (IS_ERR_OR_NULL(c->root))
+ if (IS_ERR(c->root))
goto err;
list_del_init(&c->root->list);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 45d8af755de6..a438efb66069 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1104,7 +1104,7 @@ SHOW(__bch_cache)
sum += INITIAL_PRIO - cached[i];
if (n)
- do_div(sum, n);
+ sum = div64_u64(sum, n);
for (i = 0; i < ARRAY_SIZE(q); i++)
q[i] = INITIAL_PRIO - cached[n * (i + 1) /
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 24c049067f61..3accfdaee6b1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -913,7 +913,7 @@ static int bch_dirty_init_thread(void *arg)
int cur_idx, prev_idx, skip_nr;
k = p = NULL;
- cur_idx = prev_idx = 0;
+ prev_idx = 0;
bch_btree_iter_init(&c->root->keys, &iter, NULL);
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
@@ -977,24 +977,35 @@ static int bch_btre_dirty_init_thread_nr(void)
void bch_sectors_dirty_init(struct bcache_device *d)
{
int i;
+ struct btree *b = NULL;
struct bkey *k = NULL;
struct btree_iter iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
+retry_lock:
+ b = c->root;
+ rw_lock(0, b, b->level);
+ if (b != c->root) {
+ rw_unlock(0, b);
+ goto retry_lock;
+ }
+
/* Just count root keys if no leaf node */
- rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1);
op.inode = d->id;
op.count = 0;
for_each_key_filter(&c->root->keys,
- k, &iter, bch_ptr_invalid)
+ k, &iter, bch_ptr_invalid) {
+ if (KEY_INODE(k) != op.inode)
+ continue;
sectors_dirty_init_fn(&op.op, c->root, k);
+ }
- rw_unlock(0, c->root);
+ rw_unlock(0, b);
return;
}
@@ -1014,23 +1025,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
if (atomic_read(&state.enough))
break;
+ atomic_inc(&state.started);
state.infos[i].state = &state;
state.infos[i].thread =
kthread_run(bch_dirty_init_thread, &state.infos[i],
"bch_dirtcnt[%d]", i);
if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i);
+ atomic_dec(&state.started);
for (--i; i >= 0; i--)
kthread_stop(state.infos[i].thread);
goto out;
}
- atomic_inc(&state.started);
}
out:
/* Must wait for all threads to stop. */
wait_event(state.wait, atomic_read(&state.started) == 0);
- rw_unlock(0, c->root);
+ rw_unlock(0, b);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4ee4593c874a..c94373d64f2c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8666,7 +8666,8 @@ static void md_end_clone_io(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- orig_bio->bi_status = bio->bi_status;
+ if (bio->bi_status && !orig_bio->bi_status)
+ orig_bio->bi_status = bio->bi_status;
if (md_io_clone->start_time)
bio_end_io_acct(orig_bio, md_io_clone->start_time);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 614c0278419b..6b73648b3779 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t)
{
struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+ struct xgbe_channel *channel;
+ unsigned int i;
queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ);
+
+ if (!pdata->tx_usecs)
+ return;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring || channel->tx_timer_active)
+ break;
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
}
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 6e83ff59172a..32fab5e77246 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = pdata->phy.address;
- cmd->base.autoneg = pdata->phy.autoneg;
- cmd->base.speed = pdata->phy.speed;
- cmd->base.duplex = pdata->phy.duplex;
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = pdata->phy.speed;
+ cmd->base.duplex = pdata->phy.duplex;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.autoneg = pdata->phy.autoneg;
cmd->base.port = PORT_NONE;
XGBE_LM_COPY(cmd, supported, lks, supported);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 32d2c6fac652..4a2dc705b528 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1193,7 +1193,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL;
- xgbe_set_mode(pdata, mode);
+ /* Force the mode change for SFI in Fixed PHY config.
+ * Fixed PHY configs needs PLL to be enabled while doing mode set.
+ * When the SFP module isn't connected during boot, driver assumes
+ * AN is ON and attempts autonegotiation. However, if the connected
+ * SFP comes up in Fixed PHY config, the link will not come up as
+ * PLL isn't enabled while the initial mode set command is issued.
+ * So, force the mode change for SFI in Fixed PHY configuration to
+ * fix link issues.
+ */
+ if (mode == XGBE_MODE_SFI)
+ xgbe_change_mode(pdata, mode);
+ else
+ xgbe_set_mode(pdata, mode);
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 08d7edccfb8d..3f99eb198245 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3844,7 +3844,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
int aq_ret = 0;
- int i, ret;
+ int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = -EINVAL;
@@ -3868,8 +3868,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
- if (!cfilter)
- return -ENOMEM;
+ if (!cfilter) {
+ aq_ret = -ENOMEM;
+ goto err_out;
+ }
/* parse destination mac address */
for (i = 0; i < ETH_ALEN; i++)
@@ -3917,13 +3919,13 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
/* Adding cloud filter programmed as TC filter */
if (tcf.dst_port)
- ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
else
- ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
- if (ret) {
+ aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (aq_ret) {
dev_err(&pf->pdev->dev,
"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
- vf->vf_id, ERR_PTR(ret),
+ vf->vf_id, ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 6607fa6fe556..fb9c93f37e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7401,15 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- /* configure PTP timestamping after VSI rebuild */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) {
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_cfg_timestamp(pf, false);
- else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL)
- /* for E82x PHC owner always need to have interrupts */
- ice_ptp_cfg_timestamp(pf, true);
- }
-
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
@@ -7461,6 +7452,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
+
+ /* Restore timestamp mode settings after VSI rebuild */
+ ice_ptp_restore_timestamp_mode(pf);
return;
err_vsi_rebuild:
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 1eddcbe89b0c..71f405f8a6fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -256,48 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
}
/**
- * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamp interrupt is enabled or disabled
+ * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
+ * @pf: Board private structure
+ *
+ * Program the device to respond appropriately to the Tx timestamp interrupt
+ * cause.
*/
-static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on)
+static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
{
+ struct ice_hw *hw = &pf->hw;
+ bool enable;
u32 val;
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* React to interrupts across all quads. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
+ enable = true;
+ break;
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* Do not react to interrupts on any quad. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
+ enable = false;
+ break;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ default:
+ enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
+ break;
+ }
+
/* Configure the Tx timestamp interrupt */
- val = rd32(&pf->hw, PFINT_OICR_ENA);
- if (on)
+ val = rd32(hw, PFINT_OICR_ENA);
+ if (enable)
val |= PFINT_OICR_TSYN_TX_M;
else
val &= ~PFINT_OICR_TSYN_TX_M;
- wr32(&pf->hw, PFINT_OICR_ENA, val);
-}
-
-/**
- * ice_set_tx_tstamp - Enable or disable Tx timestamping
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamps are enabled or disabled
- */
-static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
-{
- struct ice_vsi *vsi;
- u16 i;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return;
-
- /* Set the timestamp enable flag for all the Tx rings */
- ice_for_each_txq(vsi, i) {
- if (!vsi->tx_rings[i])
- continue;
- vsi->tx_rings[i]->ptp_tx = on;
- }
-
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
- ice_ptp_configure_tx_tstamp(pf, on);
-
- pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ wr32(hw, PFINT_OICR_ENA, val);
}
/**
@@ -311,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
u16 i;
vsi = ice_get_main_vsi(pf);
- if (!vsi)
+ if (!vsi || !vsi->rx_rings)
return;
/* Set the timestamp flag for all the Rx rings */
@@ -320,23 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+}
+
+/**
+ * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
+ * @pf: Board private structure
+ *
+ * Called during preparation for reset to temporarily disable timestamping on
+ * the device. Called during remove to disable timestamping while cleaning up
+ * driver resources.
+ */
+static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ val = rd32(hw, PFINT_OICR_ENA);
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(hw, PFINT_OICR_ENA, val);
- pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
- HWTSTAMP_FILTER_NONE;
+ ice_set_rx_tstamp(pf, false);
}
/**
- * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
+ * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
* @pf: Board private structure
- * @ena: bool value to enable or disable time stamp
*
- * This function will configure timestamping during PTP initialization
- * and deinitialization
+ * Called at the end of rebuild to restore timestamp configuration after
+ * a device reset.
*/
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
{
- ice_set_tx_tstamp(pf, ena);
- ice_set_rx_tstamp(pf, ena);
+ struct ice_hw *hw = &pf->hw;
+ bool enable_rx;
+
+ ice_ptp_cfg_tx_interrupt(pf);
+
+ enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ ice_set_rx_tstamp(pf, enable_rx);
+
+ /* Trigger an immediate software interrupt to ensure that timestamps
+ * which occurred during reset are handled now.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
}
/**
@@ -2037,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- ice_set_tx_tstamp(pf, false);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ON:
- ice_set_tx_tstamp(pf, true);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
break;
default:
return -ERANGE;
@@ -2048,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- ice_set_rx_tstamp(pf, false);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
@@ -2064,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- ice_set_rx_tstamp(pf, true);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
+ /* Immediately update the device timestamping mode */
+ ice_ptp_restore_timestamp_mode(pf);
+
return 0;
}
@@ -2737,7 +2761,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf)
clear_bit(ICE_FLAG_PTP, pf->flags);
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
kthread_cancel_delayed_work_sync(&ptp->work);
@@ -2803,15 +2827,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) {
- /* The clock owner for this device type handles the timestamp
- * interrupt for all ports.
- */
- ice_ptp_configure_tx_tstamp(pf, true);
-
- /* React on all quads interrupts for E82x */
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
-
+ if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
err = ice_ptp_tx_ena_intr(pf, true, itr);
if (err)
@@ -2881,13 +2897,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
case ICE_PHY_E822:
- /* Non-owner PFs don't react to any interrupts on E82x,
- * neither on own quad nor on others
- */
- if (!ice_ptp_pf_handles_tx_interrupt(pf)) {
- ice_ptp_configure_tx_tstamp(pf, false);
- wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
- }
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
@@ -3032,6 +3041,9 @@ void ice_ptp_init(struct ice_pf *pf)
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
+ /* Configure initial Tx interrupt settings */
+ ice_ptp_cfg_tx_interrupt(pf);
+
set_bit(ICE_FLAG_PTP, pf->flags);
err = ice_ptp_init_work(pf, ptp);
if (err)
@@ -3067,7 +3079,7 @@ void ice_ptp_release(struct ice_pf *pf)
return;
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
ice_ptp_remove_auxbus_device(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 8f6f94392756..06a330867fc9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -292,7 +292,7 @@ int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -317,8 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
-static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
-
+static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { }
static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 52d0a126eb61..9e97ea863068 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2306,9 +2306,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
return;
- if (!tx_ring->ptp_tx)
- return;
-
/* Tx timestamps cannot be sampled when doing TSO */
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 166413fc33f4..daf7b9dbb143 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -380,7 +380,6 @@ struct ice_tx_ring {
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 4762dbea64a1..97a71e9b8563 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -1088,6 +1088,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
struct ethhdr *eth_hdr;
bool new = false;
int err = 0;
+ u64 vf_num;
u32 ring;
if (!flow_cfg->max_flows) {
@@ -1100,7 +1101,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
- if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ /* Number of queues on a VF can be greater or less than
+ * the PF's queue. Hence no need to check for the
+ * queue count. Hence no need to check queue count if PF
+ * is installing for its VF. Below is the expected vf_num value
+ * based on the ethtool commands.
+ *
+ * e.g.
+ * 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
+ * 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
+ * 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
+ * vf_num:vf_idx+1
+ */
+ vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+ if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
+ ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
if (fsp->location >= otx2_get_maxflows(flow_cfg))
@@ -1182,6 +1197,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
flow_cfg->nr_flows++;
}
+ if (flow->is_vf)
+ netdev_info(pfvf->netdev,
+ "Make sure that VF's queue number is within its queue limit\n");
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 91b99fd70361..ba95ac913274 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1934,6 +1934,8 @@ int otx2_stop(struct net_device *netdev)
/* Clear RSS enable flag */
rss = &pf->hw.rss_info;
rss->enable = false;
+ if (!netif_is_rxfh_configured(netdev))
+ kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Cleanup Queue IRQ */
vec = pci_irq_vector(pf->pdev,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index b9bb1d2f0237..295366a85c63 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2599,9 +2599,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode &= ~AcceptMulticast;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||
- tp->mac_version == RTL_GIGA_MAC_VER_35 ||
- tp->mac_version == RTL_GIGA_MAC_VER_46 ||
- tp->mac_version == RTL_GIGA_MAC_VER_48) {
+ tp->mac_version == RTL_GIGA_MAC_VER_35) {
/* accept all multicasts */
} else if (netdev_mc_empty(dev)) {
rx_mode &= ~AcceptMulticast;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index a2b9e289aa36..85dcda51df05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -280,7 +280,7 @@ config DWMAC_INTEL
config DWMAC_LOONGSON
tristate "Loongson PCI DWMAC support"
default MACH_LOONGSON64
- depends on STMMAC_ETH && PCI
+ depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
depends on COMMON_CLK
help
This selects the LOONGSON PCI bus support for the stmmac driver,
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index a3c5de9d547a..533e912af089 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1769,10 +1769,12 @@ int wx_sw_init(struct wx *wx)
wx->subsystem_device_id = pdev->subsystem_device;
} else {
err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
- if (!err)
- wx->subsystem_device_id = swab16((u16)ssid);
+ if (err < 0) {
+ wx_err(wx, "read of internal subsystem device id failed\n");
+ return err;
+ }
- return err;
+ wx->subsystem_device_id = swab16((u16)ssid);
}
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 3d43f808c86b..8db804543e66 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -121,10 +121,8 @@ static int ngbe_sw_init(struct wx *wx)
/* PCI config space info */
err = wx_sw_init(wx);
- if (err < 0) {
- wx_err(wx, "read of internal subsystem device id failed\n");
+ if (err < 0)
return err;
- }
/* mac type, phy type , oem type */
ngbe_init_type_code(wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 70f0b5c01dac..526250102db2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -364,10 +364,8 @@ static int txgbe_sw_init(struct wx *wx)
/* PCI config space info */
err = wx_sw_init(wx);
- if (err < 0) {
- wx_err(wx, "read of internal subsystem device id failed\n");
+ if (err < 0)
return err;
- }
txgbe_init_type_code(wx);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 82d0d44b2b02..bf6e33990490 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -822,7 +822,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
/* Tx Full Checksum Offload Enabled */
cur_p->app0 |= 2;
- } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
+ } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
csum_start_off = skb_transport_offset(skb);
csum_index_off = csum_start_off + skb->csum_offset;
/* Tx Partial Checksum Offload Enabled */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3ba3c8fb28a5..706ea5263e87 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2206,9 +2206,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
- /* set slave flag before open to prevent IPv6 addrconf */
- vf_netdev->flags |= IFF_SLAVE;
-
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@@ -2315,16 +2312,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
}
- /* Fallback path to check synthetic vf with
- * help of mac addr
+ /* Fallback path to check synthetic vf with help of mac addr.
+ * Because this function can be called before vf_netdev is
+ * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
+ * from dev_addr, also try to match to its dev_addr.
+ * Note: On Hyper-V and Azure, it's not possible to set a MAC address
+ * on a VF that matches to the MAC of a unrelated NETVSC device.
*/
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
- if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
- netdev_notice(vf_netdev,
- "falling back to mac addr based matching\n");
+ if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
+ ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
return ndev;
- }
}
netdev_notice(vf_netdev,
@@ -2332,6 +2331,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return NULL;
}
+static int netvsc_prepare_bonding(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+
+ ndev = get_netvsc_byslot(vf_netdev);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ /* set slave flag before open to prevent IPv6 addrconf */
+ vf_netdev->flags |= IFF_SLAVE;
+ return NOTIFY_DONE;
+}
+
static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
@@ -2531,15 +2543,6 @@ static int netvsc_probe(struct hv_device *dev,
goto devinfo_failed;
}
- nvdev = rndis_filter_device_add(dev, device_info);
- if (IS_ERR(nvdev)) {
- ret = PTR_ERR(nvdev);
- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- goto rndis_failed;
- }
-
- eth_hw_addr_set(net, device_info->mac_adr);
-
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
@@ -2547,9 +2550,23 @@ static int netvsc_probe(struct hv_device *dev,
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs forever.
+ *
+ * The rtnl lock also needs to be held before rndis_filter_device_add()
+ * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
+ * VF NIC offering and registering. If VF NIC finished register_netdev()
+ * earlier it may cause name based config failure.
*/
rtnl_lock();
+ nvdev = rndis_filter_device_add(dev, device_info);
+ if (IS_ERR(nvdev)) {
+ ret = PTR_ERR(nvdev);
+ netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+ goto rndis_failed;
+ }
+
+ eth_hw_addr_set(net, device_info->mac_adr);
+
if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work);
@@ -2586,9 +2603,9 @@ static int netvsc_probe(struct hv_device *dev,
return 0;
register_failed:
- rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
+ rtnl_unlock();
netvsc_devinfo_put(device_info);
devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
@@ -2753,6 +2770,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
switch (event) {
+ case NETDEV_POST_INIT:
+ return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
return netvsc_register_vf(event_dev);
case NETDEV_UNREGISTER:
@@ -2788,12 +2807,17 @@ static int __init netvsc_drv_init(void)
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ register_netdevice_notifier(&netvsc_netdev_notifier);
+
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
- return ret;
+ goto err_vmbus_reg;
- register_netdevice_notifier(&netvsc_netdev_notifier);
return 0;
+
+err_vmbus_reg:
+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
+ return ret;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
index d7b81a36d673..145eb0bd096d 100644
--- a/drivers/net/ipa/reg/gsi_reg-v5.0.c
+++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
@@ -78,7 +78,7 @@ REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001c000 + 0x12000 * GSI_EE_AP, 0x80);
static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
- [R_LENGTH] = GENMASK(19, 0),
+ [R_LENGTH] = GENMASK(23, 0),
};
REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 5a0f86f38f09..97bd6705c241 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -7,6 +7,7 @@
#include <linux/filter.h>
#include <linux/netfilter_netdev.h>
#include <linux/bpf_mprog.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/netkit.h>
#include <net/dst.h>
@@ -68,6 +69,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
const struct bpf_mprog_entry *entry;
struct net_device *peer;
+ int len = skb->len;
rcu_read_lock();
peer = rcu_dereference(nk->peer);
@@ -85,15 +87,22 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
case NETKIT_PASS:
skb->protocol = eth_type_trans(skb, skb->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
- __netif_rx(skb);
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
+ dev_sw_netstats_tx_add(dev, 1, len);
+ dev_sw_netstats_rx_add(peer, len);
+ } else {
+ goto drop_stats;
+ }
break;
case NETKIT_REDIRECT:
+ dev_sw_netstats_tx_add(dev, 1, len);
skb_do_redirect(skb);
break;
case NETKIT_DROP:
default:
drop:
kfree_skb(skb);
+drop_stats:
dev_core_stats_tx_dropped_inc(dev);
ret_dev = NET_XMIT_DROP;
break;
@@ -169,11 +178,18 @@ out:
rcu_read_unlock();
}
-static struct net_device *netkit_peer_dev(struct net_device *dev)
+INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
{
return rcu_dereference(netkit_priv(dev)->peer);
}
+static void netkit_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ dev_fetch_sw_netstats(stats, dev->tstats);
+ stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
+}
+
static void netkit_uninit(struct net_device *dev);
static const struct net_device_ops netkit_netdev_ops = {
@@ -184,6 +200,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_set_rx_headroom = netkit_set_headroom,
.ndo_get_iflink = netkit_get_iflink,
.ndo_get_peer_dev = netkit_peer_dev,
+ .ndo_get_stats64 = netkit_get_stats,
.ndo_uninit = netkit_uninit,
.ndo_features_check = passthru_features_check,
};
@@ -218,6 +235,7 @@ static void netkit_setup(struct net_device *dev)
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->flags |= IFF_NOARP;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index a017e9de2119..7b8afa589a53 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 pkt_count = 0;
u64 desc_hdr = 0;
u16 vlan_tag = 0;
- u32 skb_len = 0;
+ u32 skb_len;
if (!skb)
goto err;
- if (skb->len == 0)
+ skb_len = skb->len;
+ if (skb_len < sizeof(desc_hdr))
goto err;
- skb_len = skb->len;
/* RX Descriptor Header */
- skb_trim(skb, skb->len - sizeof(desc_hdr));
+ skb_trim(skb, skb_len - sizeof(desc_hdr));
desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
/* Check these packets */
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index aff39bf3161d..4ea0e155bb0d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1583,11 +1583,11 @@ static int ax88179_reset(struct usbnet *dev)
*tmp16 = AX_PHYPWR_RSTCTL_IPRL;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
- msleep(200);
+ msleep(500);
*tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
- msleep(100);
+ msleep(200);
/* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 344af3c5c836..e2e181378f41 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1289,6 +1289,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x0168, 4)},
{QMI_FIXED_INTF(0x19d2, 0x0176, 3)},
{QMI_FIXED_INTF(0x19d2, 0x0178, 3)},
+ {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */
{QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */
{QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */
{QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 9980517ed8b0..57efb3454c57 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -236,8 +236,8 @@ static void veth_get_ethtool_stats(struct net_device *dev,
data[tx_idx + j] += *(u64 *)(base + offset);
}
} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
- pp_idx = tx_idx + VETH_TQ_STATS_LEN;
}
+ pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
page_pool_stats:
veth_get_page_pool_stats(dev, &data[pp_idx]);
@@ -373,7 +373,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
if (!use_napi)
- dev_lstats_add(dev, length);
+ dev_sw_netstats_tx_add(dev, 1, length);
else
__veth_xdp_flush(rq);
} else {
@@ -387,14 +387,6 @@ drop:
return ret;
}
-static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
-{
- struct veth_priv *priv = netdev_priv(dev);
-
- dev_lstats_read(dev, packets, bytes);
- return atomic64_read(&priv->dropped);
-}
-
static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
@@ -432,24 +424,24 @@ static void veth_get_stats64(struct net_device *dev,
struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer;
struct veth_stats rx;
- u64 packets, bytes;
- tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
- tot->tx_bytes = bytes;
- tot->tx_packets = packets;
+ tot->tx_dropped = atomic64_read(&priv->dropped);
+ dev_fetch_sw_netstats(tot, dev->tstats);
veth_stats_rx(&rx, dev);
tot->tx_dropped += rx.xdp_tx_err;
tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
- tot->rx_bytes = rx.xdp_bytes;
- tot->rx_packets = rx.xdp_packets;
+ tot->rx_bytes += rx.xdp_bytes;
+ tot->rx_packets += rx.xdp_packets;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (peer) {
- veth_stats_tx(peer, &packets, &bytes);
- tot->rx_bytes += bytes;
- tot->rx_packets += packets;
+ struct rtnl_link_stats64 tot_peer = {};
+
+ dev_fetch_sw_netstats(&tot_peer, peer->tstats);
+ tot->rx_bytes += tot_peer.tx_bytes;
+ tot->rx_packets += tot_peer.tx_packets;
veth_stats_rx(&rx, peer);
tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
@@ -1506,25 +1498,12 @@ static void veth_free_queues(struct net_device *dev)
static int veth_dev_init(struct net_device *dev)
{
- int err;
-
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
-
- err = veth_alloc_queues(dev);
- if (err) {
- free_percpu(dev->lstats);
- return err;
- }
-
- return 0;
+ return veth_alloc_queues(dev);
}
static void veth_dev_free(struct net_device *dev)
{
veth_free_queues(dev);
- free_percpu(dev->lstats);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1796,6 +1775,7 @@ static void veth_setup(struct net_device *dev)
NETIF_F_HW_VLAN_STAG_RX);
dev->needs_free_netdev = true;
dev->priv_destructor = veth_dev_free;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->max_mtu = ETH_MAX_MTU;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index db766941b78f..bb95ce43cd97 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -121,22 +121,12 @@ struct net_vrf {
int ifindex;
};
-struct pcpu_dstats {
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_drps;
- u64 rx_pkts;
- u64 rx_bytes;
- u64 rx_drps;
- struct u64_stats_sync syncp;
-};
-
static void vrf_rx_stats(struct net_device *dev, int len)
{
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
- dstats->rx_pkts++;
+ dstats->rx_packets++;
dstats->rx_bytes += len;
u64_stats_update_end(&dstats->syncp);
}
@@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev,
do {
start = u64_stats_fetch_begin(&dstats->syncp);
tbytes = dstats->tx_bytes;
- tpkts = dstats->tx_pkts;
- tdrops = dstats->tx_drps;
+ tpkts = dstats->tx_packets;
+ tdrops = dstats->tx_drops;
rbytes = dstats->rx_bytes;
- rpkts = dstats->rx_pkts;
+ rpkts = dstats->rx_packets;
} while (u64_stats_fetch_retry(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
@@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
vrf_rx_stats(dev, len);
else
- this_cpu_inc(dev->dstats->rx_drps);
+ this_cpu_inc(dev->dstats->rx_drops);
return NETDEV_TX_OK;
}
@@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
- dstats->tx_pkts++;
+ dstats->tx_packets++;
dstats->tx_bytes += len;
u64_stats_update_end(&dstats->syncp);
} else {
- this_cpu_inc(dev->dstats->tx_drps);
+ this_cpu_inc(dev->dstats->tx_drops);
}
return ret;
@@ -1174,22 +1164,15 @@ static void vrf_dev_uninit(struct net_device *dev)
vrf_rtable_release(dev, vrf);
vrf_rt6_release(dev, vrf);
-
- free_percpu(dev->dstats);
- dev->dstats = NULL;
}
static int vrf_dev_init(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
- if (!dev->dstats)
- goto out_nomem;
-
/* create the default dst which points back to us */
if (vrf_rtable_create(dev) != 0)
- goto out_stats;
+ goto out_nomem;
if (vrf_rt6_create(dev) != 0)
goto out_rth;
@@ -1203,9 +1186,6 @@ static int vrf_dev_init(struct net_device *dev)
out_rth:
vrf_rtable_release(dev, vrf);
-out_stats:
- free_percpu(dev->dstats);
- dev->dstats = NULL;
out_nomem:
return -ENOMEM;
}
@@ -1704,6 +1684,8 @@ static void vrf_setup(struct net_device *dev)
dev->min_mtu = IPV6_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU;
dev->mtu = dev->max_mtu;
+
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 258dcc103921..deb9636b0ecf 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
*/
while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
- ++dev->stats.tx_dropped;
+ DEV_STATS_INC(dev, tx_dropped);
}
skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
@@ -228,7 +228,7 @@ err_icmp:
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
err:
- ++dev->stats.tx_errors;
+ DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb);
return ret;
}
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 0b3f0c843550..a176653c8861 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -416,20 +416,20 @@ dishonest_packet_peer:
net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
dev->name, skb, peer->internal_id,
&peer->endpoint.addr);
- ++dev->stats.rx_errors;
- ++dev->stats.rx_frame_errors;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed;
dishonest_packet_type:
net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
- ++dev->stats.rx_errors;
- ++dev->stats.rx_frame_errors;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed;
dishonest_packet_size:
net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
- ++dev->stats.rx_errors;
- ++dev->stats.rx_length_errors;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_length_errors);
goto packet_processed;
packet_processed:
dev_kfree_skb(skb);
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 95c853b59e1d..0d48e0f4a1ba 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -333,7 +333,8 @@ err:
void wg_packet_purge_staged_packets(struct wg_peer *peer)
{
spin_lock_bh(&peer->staged_packet_queue.lock);
- peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
+ DEV_STATS_ADD(peer->device->dev, tx_dropped,
+ peer->staged_packet_queue.qlen);
__skb_queue_purge(&peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
}
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index b027be0b0b6f..590b038e449e 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -26,10 +26,14 @@ struct virtual_nci_dev {
struct mutex mtx;
struct sk_buff *send_buff;
struct wait_queue_head wq;
+ bool running;
};
static int virtual_nci_open(struct nci_dev *ndev)
{
+ struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
+
+ vdev->running = true;
return 0;
}
@@ -40,6 +44,7 @@ static int virtual_nci_close(struct nci_dev *ndev)
mutex_lock(&vdev->mtx);
kfree_skb(vdev->send_buff);
vdev->send_buff = NULL;
+ vdev->running = false;
mutex_unlock(&vdev->mtx);
return 0;
@@ -50,7 +55,7 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
struct virtual_nci_dev *vdev = nci_get_drvdata(ndev);
mutex_lock(&vdev->mtx);
- if (vdev->send_buff) {
+ if (vdev->send_buff || !vdev->running) {
mutex_unlock(&vdev->mtx);
kfree_skb(skb);
return -1;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 48328e36e93b..72c0525c75f5 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -757,12 +757,11 @@ static void nvme_queue_auth_work(struct work_struct *work)
__func__, chap->qid);
mutex_lock(&ctrl->dhchap_auth_mutex);
ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
if (ret) {
- mutex_unlock(&ctrl->dhchap_auth_mutex);
chap->error = ret;
goto fail2;
}
- mutex_unlock(&ctrl->dhchap_auth_mutex);
/* DH-HMAC-CHAP Step 3: send reply */
dev_dbg(ctrl->device, "%s: qid %d send reply\n",
@@ -839,6 +838,8 @@ static void nvme_queue_auth_work(struct work_struct *work)
}
fail2:
+ if (chap->status == 0)
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
__func__, chap->qid, chap->status);
tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 88b54cdcbd68..46a4c9c5ea96 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -482,7 +482,6 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
{
- nvme_stop_keep_alive(ctrl);
if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
nvme_cancel_request, ctrl);
@@ -1814,16 +1813,18 @@ set_pi:
return ret;
}
-static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
- if (nvme_init_ms(ns, id))
- return;
+ ret = nvme_init_ms(ns, id);
+ if (ret)
+ return ret;
ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return;
+ return 0;
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
@@ -1832,7 +1833,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* remap the separate metadata buffer from the block layer.
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
- return;
+ return 0;
ns->features |= NVME_NS_EXT_LBAS;
@@ -1859,6 +1860,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
else
ns->features |= NVME_NS_METADATA_SUPPORTED;
}
+ return 0;
}
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@@ -2032,7 +2034,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
- nvme_configure_metadata(ns, id);
+ ret = nvme_configure_metadata(ns, id);
+ if (ret < 0) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(ns->disk, ns, id);
@@ -4348,6 +4354,7 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
nvme_auth_stop(ctrl);
+ nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 4673ead69c5f..aa88606a44c4 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -667,8 +667,10 @@ static const match_table_t opt_tokens = {
#endif
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+#ifdef CONFIG_NVME_HOST_AUTH
{ NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
{ NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
+#endif
#ifdef CONFIG_NVME_TCP_TLS
{ NVMF_OPT_TLS, "tls" },
#endif
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 49c3e46eaa1e..9f9a3b35dc64 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2530,12 +2530,6 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above.
*/
nvme_quiesce_admin_queue(&ctrl->ctrl);
-
- /*
- * Open-coding nvme_cancel_admin_tagset() as fc
- * is not using nvme_cancel_request().
- */
- nvme_stop_keep_alive(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -3138,11 +3132,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_unquiesce_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
- if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
- ret = -EIO;
if (ret)
goto out_disconnect_admin_queue;
-
+ if (test_bit(ASSOC_FAILED, &ctrl->flags)) {
+ ret = -EIO;
+ goto out_stop_keep_alive;
+ }
/* sanity checks */
/* FC-NVME does not have other data in the capsule */
@@ -3150,7 +3145,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff);
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_disconnect_admin_queue;
+ goto out_stop_keep_alive;
}
/* FC-NVME supports normal SGL Data Block Descriptors */
@@ -3158,7 +3153,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_disconnect_admin_queue;
+ goto out_stop_keep_alive;
}
if (opts->queue_size > ctrl->ctrl.maxcmd) {
@@ -3205,6 +3200,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
out_term_aen_ops:
nvme_fc_term_aen_ops(ctrl);
+out_stop_keep_alive:
+ nvme_stop_keep_alive(&ctrl->ctrl);
out_disconnect_admin_queue:
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: create_assoc failed, assoc_id %llx ret %d\n",
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a7fea4cbacd7..6d178d555920 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1080,6 +1080,7 @@ destroy_io:
nvme_rdma_free_io_queues(ctrl);
}
destroy_admin:
+ nvme_stop_keep_alive(&ctrl->ctrl);
nvme_quiesce_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 89661a9cf850..d79811cfa0ce 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -36,11 +36,11 @@ static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
-#ifdef CONFIG_NVME_TCP_TLS
/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
+#ifdef CONFIG_NVME_TCP_TLS
module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
@@ -161,10 +161,8 @@ struct nvme_tcp_queue {
struct ahash_request *snd_hash;
__le32 exp_ddgst;
__le32 recv_ddgst;
-#ifdef CONFIG_NVME_TCP_TLS
struct completion tls_complete;
int tls_err;
-#endif
struct page_frag_cache pf_cache;
void (*state_change)(struct sock *);
@@ -207,6 +205,14 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
+{
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
+ return 0;
+
+ return ctrl->opts->tls;
+}
+
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
{
u32 queue_idx = nvme_tcp_queue_id(queue);
@@ -1412,7 +1418,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
- if (queue->ctrl->ctrl.opts->tls) {
+ if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
@@ -1424,7 +1430,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
ret = -ENOTCONN;
- if (queue->ctrl->ctrl.opts->tls) {
+ if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
@@ -1548,7 +1554,6 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
-#ifdef CONFIG_NVME_TCP_TLS
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
{
struct nvme_tcp_queue *queue = data;
@@ -1625,14 +1630,6 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
}
return ret;
}
-#else
-static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
- struct nvme_tcp_queue *queue,
- key_serial_t pskid)
-{
- return -EPROTONOSUPPORT;
-}
-#endif
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
key_serial_t pskid)
@@ -1759,7 +1756,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
}
/* If PSKs are configured try to start TLS */
- if (pskid) {
+ if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret)
goto err_init_connect;
@@ -1916,7 +1913,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
int ret;
key_serial_t pskid = 0;
- if (ctrl->opts->tls) {
+ if (nvme_tcp_tls(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
else
@@ -1949,7 +1946,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
- if (ctrl->opts->tls && !ctrl->tls_key) {
+ if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY;
}
@@ -2237,6 +2234,7 @@ destroy_io:
nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin:
+ nvme_stop_keep_alive(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
return ret;
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 31633da9427c..e1ebc73f3e5e 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -4,6 +4,8 @@ config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
+ select NVME_KEYRING if NVME_TARGET_TCP_TLS
+ select KEYS if NVME_TARGET_TCP_TLS
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
select SGL_ALLOC
help
@@ -87,9 +89,7 @@ config NVME_TARGET_TCP
config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP
- select NVME_KEYRING
select NET_HANDSHAKE
- select KEYS
help
Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 9eed6e6765ea..e307a044b1a1 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1893,7 +1893,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
return ERR_PTR(-ENOMEM);
}
- if (nvme_keyring_id()) {
+ if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
port->keyring = key_lookup(nvme_keyring_id());
if (IS_ERR(port->keyring)) {
pr_warn("NVMe keyring not available, disabling TLS\n");
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 43b5bd8bb6a5..d8da840a1c0e 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -244,6 +244,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl);
if (status)
@@ -313,6 +315,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out;
}
+ d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
+ d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), req);
if (!ctrl) {
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 92b74d0b8686..4cc27856aa8f 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1854,6 +1854,8 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
}
return ret;
}
+#else
+static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
#endif
static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1911,9 +1913,9 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
mutex_unlock(&nvmet_tcp_queue_mutex);
-#ifdef CONFIG_NVME_TARGET_TCP_TLS
INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
nvmet_tcp_tls_handshake_timeout);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
struct sock *sk = queue->sock->sk;
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index cd6ac04c1468..c3104714b480 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -964,33 +964,6 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ }
};
-static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev)
-{
- int ret;
-
- switch (dev->cpu_id) {
- case AMD_CPU_ID_YC:
- if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) {
- ret = -EINVAL;
- goto err_dram_size;
- }
- break;
- default:
- ret = -EINVAL;
- goto err_dram_size;
- }
-
- ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
- if (ret || !dev->dram_size)
- goto err_dram_size;
-
- return 0;
-
-err_dram_size:
- dev_err(dev->dev, "DRAM size command not supported for this platform\n");
- return ret;
-}
-
static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
{
u32 phys_addr_low, phys_addr_hi;
@@ -1009,8 +982,8 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
return -EIO;
/* Get DRAM size */
- ret = amd_pmc_get_dram_size(dev);
- if (ret)
+ ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
+ if (ret || !dev->dram_size)
dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
/* Get STB DRAM address */
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 5798b49ddaba..8c9f4f3227fc 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -588,17 +588,14 @@ static void release_attributes_data(void)
static int hp_add_other_attributes(int attr_type)
{
struct kobject *attr_name_kobj;
- union acpi_object *obj = NULL;
int ret;
char *attr_name;
- mutex_lock(&bioscfg_drv.mutex);
-
attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
- if (!attr_name_kobj) {
- ret = -ENOMEM;
- goto err_other_attr_init;
- }
+ if (!attr_name_kobj)
+ return -ENOMEM;
+
+ mutex_lock(&bioscfg_drv.mutex);
/* Check if attribute type is supported */
switch (attr_type) {
@@ -615,14 +612,14 @@ static int hp_add_other_attributes(int attr_type)
default:
pr_err("Error: Unknown attr_type: %d\n", attr_type);
ret = -EINVAL;
- goto err_other_attr_init;
+ kfree(attr_name_kobj);
+ goto unlock_drv_mutex;
}
ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
NULL, "%s", attr_name);
if (ret) {
pr_err("Error encountered [%d]\n", ret);
- kobject_put(attr_name_kobj);
goto err_other_attr_init;
}
@@ -630,27 +627,26 @@ static int hp_add_other_attributes(int attr_type)
switch (attr_type) {
case HPWMI_SECURE_PLATFORM_TYPE:
ret = hp_populate_secure_platform_data(attr_name_kobj);
- if (ret)
- goto err_other_attr_init;
break;
case HPWMI_SURE_START_TYPE:
ret = hp_populate_sure_start_data(attr_name_kobj);
- if (ret)
- goto err_other_attr_init;
break;
default:
ret = -EINVAL;
- goto err_other_attr_init;
}
+ if (ret)
+ goto err_other_attr_init;
+
mutex_unlock(&bioscfg_drv.mutex);
return 0;
err_other_attr_init:
+ kobject_put(attr_name_kobj);
+unlock_drv_mutex:
mutex_unlock(&bioscfg_drv.mutex);
- kfree(obj);
return ret;
}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ac037540acfc..88eefccb6ed2 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1425,18 +1425,17 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
if (WARN_ON(priv->kbd_bl.initialized))
return -EEXIST;
- brightness = ideapad_kbd_bl_brightness_get(priv);
- if (brightness < 0)
- return brightness;
-
- priv->kbd_bl.last_brightness = brightness;
-
if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) {
priv->kbd_bl.led.max_brightness = 2;
} else {
priv->kbd_bl.led.max_brightness = 1;
}
+ brightness = ideapad_kbd_bl_brightness_get(priv);
+ if (brightness < 0)
+ return brightness;
+
+ priv->kbd_bl.last_brightness = brightness;
priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c
index fdf55b5d6948..e4be40f73eeb 100644
--- a/drivers/platform/x86/intel/telemetry/core.c
+++ b/drivers/platform/x86/intel/telemetry/core.c
@@ -102,7 +102,7 @@ static const struct telemetry_core_ops telm_defpltops = {
/**
* telemetry_update_events() - Update telemetry Configuration
* @pss_evtconfig: PSS related config. No change if num_evts = 0.
- * @pss_evtconfig: IOSS related config. No change if num_evts = 0.
+ * @ioss_evtconfig: IOSS related config. No change if num_evts = 0.
*
* This API updates the IOSS & PSS Telemetry configuration. Old config
* is overwritten. Call telemetry_reset_events when logging is over
@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events);
/**
* telemetry_get_eventconfig() - Returns the pss and ioss events enabled
* @pss_evtconfig: Pointer to PSS related configuration.
- * @pss_evtconfig: Pointer to IOSS related configuration.
+ * @ioss_evtconfig: Pointer to IOSS related configuration.
* @pss_len: Number of u32 elements allocated for pss_evtconfig array
* @ioss_len: Number of u32 elements allocated for ioss_evtconfig array
*
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d440319a7945..833cfab7d877 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -676,18 +676,20 @@ static void dasd_profile_start(struct dasd_block *block,
* we count each request only once.
*/
device = cqr->startdev;
- if (device->profile.data) {
- counter = 1; /* request is not yet queued on the start device */
- list_for_each(l, &device->ccw_queue)
- if (++counter >= 31)
- break;
- }
+ if (!device->profile.data)
+ return;
+
+ spin_lock(get_ccwdev_lock(device->cdev));
+ counter = 1; /* request is not yet queued on the start device */
+ list_for_each(l, &device->ccw_queue)
+ if (++counter >= 31)
+ break;
+ spin_unlock(get_ccwdev_lock(device->cdev));
+
spin_lock(&device->profile.lock);
- if (device->profile.data) {
- device->profile.data->dasd_io_nr_req[counter]++;
- if (rq_data_dir(req) == READ)
- device->profile.data->dasd_read_nr_req[counter]++;
- }
+ device->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ device->profile.data->dasd_read_nr_req[counter]++;
spin_unlock(&device->profile.lock);
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 2e663131adaf..1b1b8a41c4d4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -283,7 +283,7 @@ struct dasd_pprc_dev_info {
__u8 secondary; /* 7 Secondary device address */
__u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */
__u8 reserved2[12]; /* 10-21 reserved */
- __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */
+ __u16 prim_cu_ssid; /* 22-23 Primary Control Unit SSID */
__u8 reserved3[12]; /* 24-35 reserved */
__u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */
__u8 reserved4[90]; /* 38-127 reserved */
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 4902d45e929c..c61e6427384c 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -103,10 +103,11 @@ config CCWGROUP
config ISM
tristate "Support for ISM vPCI Adapter"
depends on PCI
+ imply SMC
default n
help
Select this option if you want to use the Internal Shared Memory
- vPCI Adapter.
+ vPCI Adapter. The adapter can be used with the SMC network protocol.
To compile as a module choose M. The module name is ism.
If unsure, choose N.
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 6df7f377d2f9..81aabbfbbe2c 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -30,7 +30,6 @@ static const struct pci_device_id ism_device_table[] = {
MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
-static const struct smcd_ops ism_ops;
#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
@@ -289,22 +288,6 @@ out:
return ret;
}
-static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
- u32 vid)
-{
- union ism_query_rgid cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_QUERY_RGID;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.vlan_valid = vid_valid;
- cmd.request.vlan_id = vid;
-
- return ism_cmd(ism, &cmd);
-}
-
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
@@ -429,23 +412,6 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info)
-{
- union ism_sig_ieq cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.trigger_irq = trigger_irq;
- cmd.request.event_code = event_code;
- cmd.request.info = info;
-
- return ism_cmd(ism, &cmd);
-}
-
static unsigned int max_bytes(unsigned int start, unsigned int len,
unsigned int boundary)
{
@@ -503,14 +469,6 @@ u8 *ism_get_seid(void)
}
EXPORT_SYMBOL_GPL(ism_get_seid);
-static u16 ism_get_chid(struct ism_dev *ism)
-{
- if (!ism || !ism->pdev)
- return 0;
-
- return to_zpci(ism->pdev)->pchid;
-}
-
static void ism_handle_event(struct ism_dev *ism)
{
struct ism_event *entry;
@@ -569,11 +527,6 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static u64 ism_get_local_gid(struct ism_dev *ism)
-{
- return ism->local_gid;
-}
-
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
@@ -774,6 +727,22 @@ module_exit(ism_exit);
/*************************** SMC-D Implementation *****************************/
#if IS_ENABLED(CONFIG_SMC)
+static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
+ u32 vid)
+{
+ union ism_query_rgid cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.vlan_valid = vid_valid;
+ cmd.request.vlan_id = vid;
+
+ return ism_cmd(ism, &cmd);
+}
+
static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
u32 vid)
{
@@ -811,6 +780,23 @@ static int smcd_reset_vlan_required(struct smcd_dev *smcd)
return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
}
+static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+{
+ union ism_sig_ieq cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ cmd.request.rgid = rgid;
+ cmd.request.trigger_irq = trigger_irq;
+ cmd.request.event_code = event_code;
+ cmd.request.info = info;
+
+ return ism_cmd(ism, &cmd);
+}
+
static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
@@ -830,11 +816,24 @@ static int smcd_supports_v2(void)
SYSTEM_EID.type[0] != '0';
}
+static u64 ism_get_local_gid(struct ism_dev *ism)
+{
+ return ism->local_gid;
+}
+
static u64 smcd_get_local_gid(struct smcd_dev *smcd)
{
return ism_get_local_gid(smcd->priv);
}
+static u16 ism_get_chid(struct ism_dev *ism)
+{
+ if (!ism || !ism->pdev)
+ return 0;
+
+ return to_zpci(ism->pdev)->pchid;
+}
+
static u16 smcd_get_chid(struct smcd_dev *smcd)
{
return ism_get_chid(smcd->priv);