diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2018-12-19 11:00:00 -0500 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2019-01-02 12:05:18 -0500 |
commit | 395069fc37e7a76280b176c0327d1ead6ca29838 (patch) | |
tree | 7d864527f3ef5d38631de205c25b9bef279016e1 /net | |
parent | ba217ec64aef91f40c3cbdbfb0ab3a4000782504 (diff) | |
download | linux-stable-395069fc37e7a76280b176c0327d1ead6ca29838.tar.gz linux-stable-395069fc37e7a76280b176c0327d1ead6ca29838.tar.bz2 linux-stable-395069fc37e7a76280b176c0327d1ead6ca29838.zip |
xprtrdma: Add trace points for calls to transport switch methods
Name them "trace_xprtrdma_op_*" so they can be easily enabled as a
group. No trace point is added where the generic layer already has
observability.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/transport.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 6a57033a5846..d94da3c57593 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -268,7 +268,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) { struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); - trace_xprtrdma_inject_dsc(r_xprt); + trace_xprtrdma_op_inject_dsc(r_xprt); rdma_disconnect(r_xprt->rx_ia.ri_id); } @@ -284,7 +284,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) { struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); - trace_xprtrdma_destroy(r_xprt); + trace_xprtrdma_op_destroy(r_xprt); cancel_delayed_work_sync(&r_xprt->rx_connect_worker); @@ -418,7 +418,7 @@ out3: out2: rpcrdma_ia_close(&new_xprt->rx_ia); out1: - trace_xprtrdma_destroy(new_xprt); + trace_xprtrdma_op_destroy(new_xprt); xprt_rdma_free_addresses(xprt); xprt_free(xprt); return ERR_PTR(rc); @@ -428,7 +428,8 @@ out1: * xprt_rdma_close - close a transport connection * @xprt: transport context * - * Called during transport shutdown, reconnect, or device removal. + * Called during autoclose or device removal. + * * Caller holds @xprt's send lock to prevent activity on this * transport while the connection is torn down. */ @@ -440,6 +441,8 @@ void xprt_rdma_close(struct rpc_xprt *xprt) might_sleep(); + trace_xprtrdma_op_close(r_xprt); + /* Prevent marshaling and sending of new requests */ xprt_clear_connected(xprt); @@ -525,6 +528,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) { struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); + trace_xprtrdma_op_connect(r_xprt); if (r_xprt->rx_ep.rep_connected != 0) { /* Reconnect */ schedule_delayed_work(&r_xprt->rx_connect_worker, @@ -659,11 +663,11 @@ xprt_rdma_allocate(struct rpc_task *task) rqst->rq_buffer = req->rl_sendbuf->rg_base; rqst->rq_rbuffer = req->rl_recvbuf->rg_base; - trace_xprtrdma_allocate(task, req); + trace_xprtrdma_op_allocate(task, req); return 0; out_fail: - trace_xprtrdma_allocate(task, NULL); + trace_xprtrdma_op_allocate(task, NULL); return -ENOMEM; } @@ -682,7 +686,7 @@ xprt_rdma_free(struct rpc_task *task) if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags)) rpcrdma_release_rqst(r_xprt, req); - trace_xprtrdma_rpc_done(task, req); + trace_xprtrdma_op_free(task, req); } /** |