summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-08-03 14:30:03 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-08-08 10:52:00 -0400
commit96f8778f70d0f5b988146d757a26dcd5d5b44116 (patch)
tree77c2d789cacf667f54cd966b7727bb13cf1e7272 /net/sunrpc
parentd31ae2548142b7cd12404929ef3a13ae27c9d961 (diff)
downloadlinux-stable-96f8778f70d0f5b988146d757a26dcd5d5b44116.tar.gz
linux-stable-96f8778f70d0f5b988146d757a26dcd5d5b44116.tar.bz2
linux-stable-96f8778f70d0f5b988146d757a26dcd5d5b44116.zip
xprtrdma: Add xdr_init_decode to rpcrdma_reply_handler()
Transport header decoding deals with untrusted input data, therefore decoding this header needs to be hardened. Adopt the same infrastructure that is used when XDR decoding NFS replies. This is slightly more CPU-intensive than the replaced code, but we're not adding new atomics, locking, or context switches. The cost is manageable. Start by initializing an xdr_stream in rpcrdma_reply_handler(). Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c37
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h8
3 files changed, 34 insertions, 14 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index ca4d6e4528f3..24f58c7b3106 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -993,10 +993,11 @@ rpcrdma_reply_handler(struct work_struct *work)
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+ struct xdr_stream *xdr = &rep->rr_stream;
struct rpcrdma_msg *headerp;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
- __be32 *iptr;
+ __be32 *iptr, *p, xid, vers, proc;
int rdmalen, status, rmerr;
unsigned long cwnd;
struct list_head mws;
@@ -1005,8 +1006,18 @@ rpcrdma_reply_handler(struct work_struct *work)
if (rep->rr_len == RPCRDMA_BAD_LEN)
goto out_badstatus;
- if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
+
+ xdr_init_decode(xdr, &rep->rr_hdrbuf,
+ rep->rr_hdrbuf.head[0].iov_base);
+
+ /* Fixed transport header fields */
+ p = xdr_inline_decode(xdr, 4 * sizeof(*p));
+ if (unlikely(!p))
goto out_shortreply;
+ xid = *p++;
+ vers = *p++;
+ p++; /* credits */
+ proc = *p++;
headerp = rdmab_to_msg(rep->rr_rdmabuf);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -1018,8 +1029,7 @@ rpcrdma_reply_handler(struct work_struct *work)
* get context for handling any incoming chunks.
*/
spin_lock(&buf->rb_lock);
- req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf,
- headerp->rm_xid);
+ req = rpcrdma_lookup_req_locked(&r_xprt->rx_buf, xid);
if (!req)
goto out_nomatch;
if (req->rl_reply)
@@ -1035,7 +1045,7 @@ rpcrdma_reply_handler(struct work_struct *work)
spin_unlock(&buf->rb_lock);
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
- __func__, rep, req, be32_to_cpu(headerp->rm_xid));
+ __func__, rep, req, be32_to_cpu(xid));
/* Invalidate and unmap the data payloads before waking the
* waiting application. This guarantees the memory regions
@@ -1052,16 +1062,16 @@ rpcrdma_reply_handler(struct work_struct *work)
* the rep, rqst, and rq_task pointers remain stable.
*/
spin_lock_bh(&xprt->transport_lock);
- rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
+ rqst = xprt_lookup_rqst(xprt, xid);
if (!rqst)
goto out_norqst;
xprt->reestablish_timeout = 0;
- if (headerp->rm_vers != rpcrdma_version)
+ if (vers != rpcrdma_version)
goto out_badversion;
/* check for expected message types */
/* The order of some of these tests is important. */
- switch (headerp->rm_type) {
+ switch (proc) {
case rdma_msg:
/* never expect read chunks */
/* never expect reply chunks (two ways to check) */
@@ -1123,7 +1133,7 @@ badheader:
default:
dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
rqst->rq_task->tk_pid, __func__,
- be32_to_cpu(headerp->rm_type));
+ be32_to_cpu(proc));
status = -EIO;
r_xprt->rx_stats.bad_reply_count++;
break;
@@ -1161,7 +1171,7 @@ out_bcall:
*/
out_badversion:
dprintk("RPC: %s: invalid version %d\n",
- __func__, be32_to_cpu(headerp->rm_vers));
+ __func__, be32_to_cpu(vers));
status = -EIO;
r_xprt->rx_stats.bad_reply_count++;
goto out;
@@ -1204,16 +1214,15 @@ out_shortreply:
out_nomatch:
spin_unlock(&buf->rb_lock);
- dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
- __func__, be32_to_cpu(headerp->rm_xid),
- rep->rr_len);
+ dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
+ __func__, be32_to_cpu(xid));
goto repost;
out_duplicate:
spin_unlock(&buf->rb_lock);
dprintk("RPC: %s: "
"duplicate reply %p to RPC request %p: xid 0x%08x\n",
- __func__, rep, req, be32_to_cpu(headerp->rm_xid));
+ __func__, rep, req, be32_to_cpu(xid));
/* If no pending RPC transaction was matched, post a replacement
* receive buffer before returning.
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index e4171f2abe37..f1b1c372f7fb 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -180,6 +180,7 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
__func__, rep, wc->byte_len);
rep->rr_len = wc->byte_len;
+ rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
rep->rr_wc_flags = wc->wc_flags;
rep->rr_inv_rkey = wc->ex.invalidate_rkey;
@@ -974,6 +975,8 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
rc = PTR_ERR(rep->rr_rdmabuf);
goto out_free;
}
+ xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
+ rdmab_length(rep->rr_rdmabuf));
rep->rr_cqe.done = rpcrdma_wc_receive;
rep->rr_rxprt = r_xprt;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index b282d3f8cdd8..13556ae11eec 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -223,6 +223,8 @@ struct rpcrdma_rep {
u32 rr_inv_rkey;
struct rpcrdma_xprt *rr_rxprt;
struct work_struct rr_work;
+ struct xdr_buf rr_hdrbuf;
+ struct xdr_stream rr_stream;
struct list_head rr_list;
struct ib_recv_wr rr_recv_wr;
struct rpcrdma_regbuf *rr_rdmabuf;
@@ -642,6 +644,12 @@ int rpcrdma_marshal_req(struct rpc_rqst *);
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
void rpcrdma_reply_handler(struct work_struct *work);
+static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
+{
+ xdr->head[0].iov_len = len;
+ xdr->len = len;
+}
+
/* RPC/RDMA module init - xprtrdma/transport.c
*/
extern unsigned int xprt_rdma_max_inline_read;