xprtrdma: Fix XDRBUF_SPARSE_PAGES support
authorChuck Lever <chuck.lever@oracle.com>
Tue, 8 Dec 2020 23:29:02 +0000 (18:29 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Dec 2020 10:54:15 +0000 (11:54 +0100)
commit 15261b9126cd5bb2ad8521da49d8f5c042d904c7 upstream.

Olga K. observed that rpcrdma_marsh_req() allocates sparse pages
only when it has determined that a Reply chunk is necessary. There
are plenty of cases where no Reply chunk is needed, but the
XDRBUF_SPARSE_PAGES flag is set. The result would be a crash in
rpcrdma_inline_fixup() when it tries to copy parts of the received
Reply into a missing page.

To avoid crashing, handle sparse page allocation up front.

Until XATTR support was added, this issue did not appear often
because the only SPARSE_PAGES consumer always expected a reply large
enough to always require a Reply chunk.

Reported-by: Olga Kornievskaia <kolga@netapp.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
net/sunrpc/xprtrdma/rpc_rdma.c

index 0f5120c..c48536f 100644 (file)
@@ -179,6 +179,31 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
                r_xprt->rx_ep->re_max_inline_recv;
 }
 
+/* ACL likes to be lazy in allocating pages. For TCP, these
+ * pages can be allocated during receive processing. Not true
+ * for RDMA, which must always provision receive buffers
+ * up front.
+ */
+static noinline int
+rpcrdma_alloc_sparse_pages(struct xdr_buf *buf)
+{
+       struct page **ppages;
+       int len;
+
+       len = buf->page_len;
+       ppages = buf->pages + (buf->page_base >> PAGE_SHIFT);
+       while (len > 0) {
+               if (!*ppages)
+                       *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
+               if (!*ppages)
+                       return -ENOBUFS;
+               ppages++;
+               len -= PAGE_SIZE;
+       }
+
+       return 0;
+}
+
 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
  * a byte range. Other modes coalesce these SGEs into a single MR
  * when they can.
@@ -233,15 +258,6 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
        ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
        page_base = offset_in_page(xdrbuf->page_base);
        while (len) {
-               /* ACL likes to be lazy in allocating pages - ACLs
-                * are small by default but can get huge.
-                */
-               if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
-                       if (!*ppages)
-                               *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
-                       if (!*ppages)
-                               return -ENOBUFS;
-               }
                seg->mr_page = *ppages;
                seg->mr_offset = (char *)page_base;
                seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
@@ -867,6 +883,12 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
        __be32 *p;
        int ret;
 
+       if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
+               ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
+               if (ret)
+                       return ret;
+       }
+
        rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
        xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
                        rqst);