summaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
authorHåkon Bugge <haakon.bugge@oracle.com>2021-03-31 20:43:14 +0200
committerJason Gunthorpe <jgg@nvidia.com>2021-04-12 19:51:48 -0300
commit5aa54bd28ce2b066d82cdd515269b9d562bd6e66 (patch)
tree027b8877446b4c7af02cc319d231d32a754161ff /net/rds
parent3aeffc46afde05140551abb49efaa4563adba38c (diff)
downloadlinux-5aa54bd28ce2b066d82cdd515269b9d562bd6e66.tar.gz
linux-5aa54bd28ce2b066d82cdd515269b9d562bd6e66.tar.bz2
linux-5aa54bd28ce2b066d82cdd515269b9d562bd6e66.zip
rds: ib: Remove two ib_modify_qp() calls
For some HCAs, ib_modify_qp() is an expensive operation running virtualized. For both the active and passive side, the QP returned by the CM has the state set to RTS, so no need for this excess RTS -> RTS transition. With IB Core's ability to set the RNR Retry timer, we use this interface to shave off another ib_modify_qp(). Fixes: ec16227e1414 ("RDS/IB: Infiniband transport") Link: https://lore.kernel.org/r/1617216194-12890-3-git-send-email-haakon.bugge@oracle.com Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/ib_cm.c35
-rw-r--r--net/rds/rdma_transport.c1
2 files changed, 2 insertions, 34 deletions
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index f5cbe963cd8f..26b069e1999d 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -68,31 +68,6 @@ static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
}
/*
- * Tune RNR behavior. Without flow control, we use a rather
- * low timeout, but not the absolute minimum - this should
- * be tunable.
- *
- * We already set the RNR retry count to 7 (which is the
- * smallest infinite number :-) above.
- * If flow control is off, we want to change this back to 0
- * so that we learn quickly when our credit accounting is
- * buggy.
- *
- * Caller passes in a qp_attr pointer - don't waste stack spacv
- * by allocation this twice.
- */
-static void
-rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
-{
- int ret;
-
- attr->min_rnr_timer = IB_RNR_TIMER_000_32;
- ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
- if (ret)
- printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
-}
-
-/*
* Connection established.
* We get here for both outgoing and incoming connection.
*/
@@ -100,7 +75,6 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
{
struct rds_ib_connection *ic = conn->c_transport_data;
const union rds_ib_conn_priv *dp = NULL;
- struct ib_qp_attr qp_attr;
__be64 ack_seq = 0;
__be32 credit = 0;
u8 major = 0;
@@ -168,14 +142,6 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
* the posted credit count. */
rds_ib_recv_refill(conn, 1, GFP_KERNEL);
- /* Tune RNR behavior */
- rds_ib_tune_rnr(ic, &qp_attr);
-
- qp_attr.qp_state = IB_QPS_RTS;
- err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
- if (err)
- printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
-
/* update ib_device with this local ipaddr */
err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr);
if (err)
@@ -947,6 +913,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
event->param.conn.responder_resources,
event->param.conn.initiator_depth, isv6);
+ rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
/* rdma_accept() calls rdma_reject() internally if it fails */
if (rdma_accept(cm_id, &conn_param))
rds_ib_conn_error(conn, "rdma_accept failed\n");
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 5f741e51b4ba..a9e4ff948a7d 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -87,6 +87,7 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ADDR_RESOLVED:
rdma_set_service_type(cm_id, conn->c_tos);
+ rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32);
/* XXX do we need to clean up if this fails? */
ret = rdma_resolve_route(cm_id,
RDS_RDMA_RESOLVE_TIMEOUT_MS);