summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@google.com>2017-03-23 13:39:46 -0700
committerHerbert Xu <herbert@gondor.apana.org.au>2017-03-24 21:51:34 +0800
commit9df0eb180c2074451f25556eb566d89c7057c2ac (patch)
tree493dfd3286e9d96b9c9ae42956136a4e9a7b5fb9
parentefc989fce8703914bac091dcc4b8ff7a72ccf987 (diff)
downloadlinux-9df0eb180c2074451f25556eb566d89c7057c2ac.tar.gz
linux-9df0eb180c2074451f25556eb566d89c7057c2ac.tar.bz2
linux-9df0eb180c2074451f25556eb566d89c7057c2ac.zip
crypto: xts,lrw - fix out-of-bounds write after kmalloc failure
In the generic XTS and LRW algorithms, for input data > 128 bytes, a temporary buffer is allocated to hold the values to be XOR'ed with the data before and after encryption or decryption. If the allocation fails, the fixed-size buffer embedded in the request buffer is meant to be used as a fallback --- resulting in more calls to the ECB algorithm, but still producing the correct result. However, we weren't correctly limiting subreq->cryptlen in this case, resulting in pre_crypt() overrunning the embedded buffer. Fix this by setting subreq->cryptlen correctly. Fixes: f1c131b45410 ("crypto: xts - Convert to skcipher") Fixes: 700cb3f5fe75 ("crypto: lrw - Convert to skcipher") Cc: stable@vger.kernel.org # v4.10+ Reported-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Eric Biggers <ebiggers@google.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/lrw.c7
-rw-r--r--crypto/xts.c7
2 files changed, 10 insertions, 4 deletions
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ecd8474018e3..3ea095adafd9 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -286,8 +286,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
subreq->cryptlen = LRW_BUFFER_SIZE;
if (req->cryptlen > LRW_BUFFER_SIZE) {
- subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
- rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
+
+ rctx->ext = kmalloc(n, gfp);
+ if (rctx->ext)
+ subreq->cryptlen = n;
}
rctx->src = req->src;
diff --git a/crypto/xts.c b/crypto/xts.c
index baeb34dd8582..c976bfac29da 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -230,8 +230,11 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
subreq->cryptlen = XTS_BUFFER_SIZE;
if (req->cryptlen > XTS_BUFFER_SIZE) {
- subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
- rctx->ext = kmalloc(subreq->cryptlen, gfp);
+ unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
+
+ rctx->ext = kmalloc(n, gfp);
+ if (rctx->ext)
+ subreq->cryptlen = n;
}
rctx->src = req->src;