diff options
author | David S. Miller <davem@davemloft.net> | 2019-07-11 15:27:20 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-07-11 15:27:20 -0700 |
commit | 3194d6adfe8e4da74e04b8eacef2bd7eea3f8992 (patch) | |
tree | 403ece564fe999cf36574ae77dfcc1e18fa56513 /net | |
parent | 114a5c3240155fdb01bf821c9d326d7bb05bd464 (diff) | |
parent | dc205a8d34228809dedab94a85a866cbb255248f (diff) | |
download | linux-3194d6adfe8e4da74e04b8eacef2bd7eea3f8992.tar.gz linux-3194d6adfe8e4da74e04b8eacef2bd7eea3f8992.tar.bz2 linux-3194d6adfe8e4da74e04b8eacef2bd7eea3f8992.zip |
Merge branch 'net/rds-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux
Santosh Shilimkar says:
====================
rds fixes
Few rds fixes which makes rds rdma transport reliably working on mainline
First two fixes are applicable to v4.11+ stable versions and last
three patches applies to only v5.1 stable and current mainline.
Patchset is re-based against 'net' and also available on below tree
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/rds/connection.c | 1 | ||||
-rw-r--r-- | net/rds/ib.h | 4 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 9 | ||||
-rw-r--r-- | net/rds/ib_frmr.c | 11 | ||||
-rw-r--r-- | net/rds/ib_send.c | 29 | ||||
-rw-r--r-- | net/rds/rdma.c | 10 | ||||
-rw-r--r-- | net/rds/rdma_transport.c | 11 | ||||
-rw-r--r-- | net/rds/rds.h | 1 | ||||
-rw-r--r-- | net/rds/send.c | 4 |
9 files changed, 30 insertions, 50 deletions
diff --git a/net/rds/connection.c b/net/rds/connection.c index 7ea134f9a825..ed7f2133acc2 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -736,6 +736,7 @@ static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) cinfo->next_rx_seq = cp->cp_next_rx_seq; cinfo->laddr = conn->c_laddr.s6_addr32[3]; cinfo->faddr = conn->c_faddr.s6_addr32[3]; + cinfo->tos = conn->c_tos; strncpy(cinfo->transport, conn->c_trans->t_name, sizeof(cinfo->transport)); cinfo->flags = 0; diff --git a/net/rds/ib.h b/net/rds/ib.h index 67a715b076ca..66c03c7665b2 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -15,8 +15,7 @@ #define RDS_IB_DEFAULT_RECV_WR 1024 #define RDS_IB_DEFAULT_SEND_WR 256 -#define RDS_IB_DEFAULT_FR_WR 256 -#define RDS_IB_DEFAULT_FR_INV_WR 256 +#define RDS_IB_DEFAULT_FR_WR 512 #define RDS_IB_DEFAULT_RETRY_COUNT 1 @@ -157,7 +156,6 @@ struct rds_ib_connection { /* To control the number of wrs from fastreg */ atomic_t i_fastreg_wrs; - atomic_t i_fastunreg_wrs; /* interrupt handling */ struct tasklet_struct i_send_tasklet; diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 66c6eb56072b..8891822eba4f 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -460,10 +460,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) * completion queue and send queue. This extra space is used for FRMR * registration and invalidation work requests */ - fr_queue_space = rds_ibdev->use_fastreg ? - (RDS_IB_DEFAULT_FR_WR + 1) + - (RDS_IB_DEFAULT_FR_INV_WR + 1) - : 0; + fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0); /* add the conn now so that connection establishment has the dev */ rds_ib_add_conn(rds_ibdev, conn); @@ -530,7 +527,6 @@ static int rds_ib_setup_qp(struct rds_connection *conn) attr.send_cq = ic->i_send_cq; attr.recv_cq = ic->i_recv_cq; atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR); - atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR); /* * XXX this can fail if max_*_wr is too large? Are we supposed @@ -1009,8 +1005,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp) wait_event(rds_ib_ring_empty_wait, rds_ib_ring_empty(&ic->i_recv_ring) && (atomic_read(&ic->i_signaled_sends) == 0) && - (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) && - (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR)); + (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR)); tasklet_kill(&ic->i_send_tasklet); tasklet_kill(&ic->i_recv_tasklet); diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 688dcd68d4ea..32ae26ed58a0 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c @@ -239,8 +239,8 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr) if (frmr->fr_state != FRMR_IS_INUSE) goto out; - while (atomic_dec_return(&ibmr->ic->i_fastunreg_wrs) <= 0) { - atomic_inc(&ibmr->ic->i_fastunreg_wrs); + while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { + atomic_inc(&ibmr->ic->i_fastreg_wrs); cpu_relax(); } @@ -257,7 +257,7 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr) if (unlikely(ret)) { frmr->fr_state = FRMR_IS_STALE; frmr->fr_inv = false; - atomic_inc(&ibmr->ic->i_fastunreg_wrs); + atomic_inc(&ibmr->ic->i_fastreg_wrs); pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret); goto out; } @@ -285,10 +285,9 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) if (frmr->fr_inv) { frmr->fr_state = FRMR_IS_FREE; frmr->fr_inv = false; - atomic_inc(&ic->i_fastreg_wrs); - } else { - atomic_inc(&ic->i_fastunreg_wrs); } + + atomic_inc(&ic->i_fastreg_wrs); } void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed, diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 18f2341202f8..dfe6237dafe2 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -69,6 +69,16 @@ static void rds_ib_send_complete(struct rds_message *rm, complete(rm, notify_status); } +static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, + struct rm_data_op *op, + int wc_status) +{ + if (op->op_nents) + ib_dma_unmap_sg(ic->i_cm_id->device, + op->op_sg, op->op_nents, + DMA_TO_DEVICE); +} + static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, struct rm_rdma_op *op, int wc_status) @@ -129,21 +139,6 @@ static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, rds_ib_stats_inc(s_ib_atomic_fadd); } -static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, - struct rm_data_op *op, - int wc_status) -{ - struct rds_message *rm = container_of(op, struct rds_message, data); - - if (op->op_nents) - ib_dma_unmap_sg(ic->i_cm_id->device, - op->op_sg, op->op_nents, - DMA_TO_DEVICE); - - if (rm->rdma.op_active && rm->data.op_notify) - rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status); -} - /* * Unmap the resources associated with a struct send_work. * @@ -902,7 +897,9 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) send->s_queued = jiffies; send->s_op = NULL; - nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); + if (!op->op_notify) + nr_sig += rds_ib_set_wr_signal_state(ic, send, + op->op_notify); send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; send->s_rdma_wr.remote_addr = remote_addr; diff --git a/net/rds/rdma.c b/net/rds/rdma.c index b340ed4fc43a..916f5ec373d8 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -641,16 +641,6 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, } op->op_notifier->n_user_token = args->user_token; op->op_notifier->n_status = RDS_RDMA_SUCCESS; - - /* Enable rmda notification on data operation for composite - * rds messages and make sure notification is enabled only - * for the data operation which follows it so that application - * gets notified only after full message gets delivered. - */ - if (rm->data.op_sg) { - rm->rdma.op_notify = 0; - rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); - } } /* The cookie contains the R_Key of the remote memory region, and diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 46bce8389066..ff74c4bbb9fc 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -112,17 +112,20 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, if (!conn) break; err = (int *)rdma_consumer_reject_data(cm_id, event, &len); - if (!err || (err && ((*err) == RDS_RDMA_REJ_INCOMPAT))) { + if (!err || + (err && len >= sizeof(*err) && + ((*err) <= RDS_RDMA_REJ_INCOMPAT))) { pr_warn("RDS/RDMA: conn <%pI6c, %pI6c> rejected, dropping connection\n", &conn->c_laddr, &conn->c_faddr); - conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION; - conn->c_tos = 0; + + if (!conn->c_tos) + conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION; + rds_conn_drop(conn); } rdsdebug("Connection rejected: %s\n", rdma_reject_msg(cm_id, event->status)); break; - /* FALLTHROUGH */ case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: diff --git a/net/rds/rds.h b/net/rds/rds.h index 0d8f67cadd74..f0066d168499 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -476,7 +476,6 @@ struct rds_message { } rdma; struct rm_data_op { unsigned int op_active:1; - unsigned int op_notify:1; unsigned int op_nents; unsigned int op_count; unsigned int op_dmasg; diff --git a/net/rds/send.c b/net/rds/send.c index 166dd578c1cc..031b1e97a466 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -491,14 +491,12 @@ void rds_rdma_send_complete(struct rds_message *rm, int status) struct rm_rdma_op *ro; struct rds_notifier *notifier; unsigned long flags; - unsigned int notify = 0; spin_lock_irqsave(&rm->m_rs_lock, flags); - notify = rm->rdma.op_notify | rm->data.op_notify; ro = &rm->rdma; if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && - ro->op_active && notify && ro->op_notifier) { + ro->op_active && ro->op_notify && ro->op_notifier) { notifier = ro->op_notifier; rs = rm->m_rs; sock_hold(rds_rs_to_sk(rs)); |