summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2018-07-27 11:18:54 -0400
committerJ. Bruce Fields <bfields@redhat.com>2018-08-09 16:11:21 -0400
commita53d5cb0646a12586ae45c892c7a411d47ee1a1d (patch)
tree8342ebe172d876e8e0358fc0531cf3f75d3e7e58 /net/sunrpc
parent7b4d6da4bb6d0ece17ca27f0b4612df87f873d83 (diff)
downloadlinux-stable-a53d5cb0646a12586ae45c892c7a411d47ee1a1d.tar.gz
linux-stable-a53d5cb0646a12586ae45c892c7a411d47ee1a1d.tar.bz2
linux-stable-a53d5cb0646a12586ae45c892c7a411d47ee1a1d.zip
svcrdma: Avoid releasing a page in svc_xprt_release()
svc_xprt_release() invokes svc_free_res_pages(), which releases pages between rq_respages and rq_next_page. Historically, the RPC/RDMA transport has set these two pointers to be different by one, which means: - one page gets released when svc_recv returns 0. This normally happens whenever one or more RDMA Reads need to be dispatched to complete construction of an RPC Call. - one page gets released after every call to svc_send. In both cases, this released page is immediately refilled by svc_alloc_arg. There does not seem to be a reason for releasing this page. To avoid this unnecessary memory allocator traffic, set rq_next_page more carefully. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c9
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c4
2 files changed, 9 insertions, 4 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 841fca143804..ddb7def48b12 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -366,9 +366,6 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
arg->page_base = 0;
arg->buflen = ctxt->rc_byte_len;
arg->len = ctxt->rc_byte_len;
-
- rqstp->rq_respages = &rqstp->rq_pages[0];
- rqstp->rq_next_page = rqstp->rq_respages + 1;
}
/* This accommodates the largest possible Write chunk,
@@ -730,6 +727,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
svc_rdma_build_arg_xdr(rqstp, ctxt);
+ /* Prevent svc_xprt_release from releasing pages in rq_pages
+ * if we return 0 or an error.
+ */
+ rqstp->rq_respages = rqstp->rq_pages;
+ rqstp->rq_next_page = rqstp->rq_respages;
+
p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
if (ret < 0)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 4a3efaea277c..b958fb65f4e3 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -657,7 +657,9 @@ static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
ctxt->sc_pages[i] = rqstp->rq_respages[i];
rqstp->rq_respages[i] = NULL;
}
- rqstp->rq_next_page = rqstp->rq_respages + 1;
+
+ /* Prevent svc_xprt_release from releasing pages in rq_pages */
+ rqstp->rq_next_page = rqstp->rq_respages;
}
/* Prepare the portion of the RPC Reply that will be transmitted