summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2021-05-26 15:35:20 -0400
committerTrond Myklebust <trond.myklebust@hammerspace.com>2021-05-27 08:46:19 -0400
commitae605ee9830840f14566a3b1cde27fa8096dbdd4 (patch)
tree21c4a953e6e86a93f1b780f3b18c6451ad8e059b /net
parente67afa7ee4a59584d7253e45d7f63b9528819a13 (diff)
downloadlinux-ae605ee9830840f14566a3b1cde27fa8096dbdd4.tar.gz
linux-ae605ee9830840f14566a3b1cde27fa8096dbdd4.tar.bz2
linux-ae605ee9830840f14566a3b1cde27fa8096dbdd4.zip
xprtrdma: Revert 586a0787ce35
Commit 9ed5af268e88 ("SUNRPC: Clean up the handling of page padding in rpc_prepare_reply_pages()") [Dec 2020] affects RPC Replies that have a data payload (i.e., Write chunks). rpcrdma_prepare_readch(), as its name suggests, sets up Read chunks which are data payloads within RPC Calls. Those payloads are constructed by xdr_write_pages(), which continues to stuff the call buffer's tail kvec with the payload's XDR roundup. Thus removing the tail buffer logic in rpcrdma_prepare_readch() was the wrong thing to do. Fixes: 586a0787ce35 ("xprtrdma: Clean up rpcrdma_prepare_readch()") Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 649f7d8b9733..c335c1361564 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -628,8 +628,9 @@ out_mapping_err:
return false;
}
-/* The tail iovec might not reside in the same page as the
- * head iovec.
+/* The tail iovec may include an XDR pad for the page list,
+ * as well as additional content, and may not reside in the
+ * same page as the head iovec.
*/
static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
struct xdr_buf *xdr,
@@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req,
struct xdr_buf *xdr)
{
- struct kvec *tail = &xdr->tail[0];
-
if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
return false;
- /* If there is a Read chunk, the page list is handled
+ /* If there is a Read chunk, the page list is being handled
* via explicit RDMA, and thus is skipped here.
*/
- if (tail->iov_len) {
- if (!rpcrdma_prepare_tail_iov(req, xdr,
- offset_in_page(tail->iov_base),
- tail->iov_len))
+ /* Do not include the tail if it is only an XDR pad */
+ if (xdr->tail[0].iov_len > 3) {
+ unsigned int page_base, len;
+
+ /* If the content in the page list is an odd length,
+ * xdr_write_pages() adds a pad at the beginning of
+ * the tail iovec. Force the tail's non-pad content to
+ * land at the next XDR position in the Send message.
+ */
+ page_base = offset_in_page(xdr->tail[0].iov_base);
+ len = xdr->tail[0].iov_len;
+ page_base += len & 3;
+ len -= len & 3;
+ if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
return false;
kref_get(&req->rl_kref);
}