summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorOndrej Mosnáček <omosnacek@gmail.com>2017-04-02 21:19:14 +0200
committerHerbert Xu <herbert@gondor.apana.org.au>2017-04-05 21:58:37 +0800
commite55318c84f199d6056a0bcd98bc4612d01ccfe80 (patch)
treeb77991aaa7d895773d631dbbdfc10ba960d96dfc /crypto
parentacb9b159c784dc0033ede0dadde876ebd93aca4c (diff)
downloadlinux-e55318c84f199d6056a0bcd98bc4612d01ccfe80.tar.gz
linux-e55318c84f199d6056a0bcd98bc4612d01ccfe80.tar.bz2
linux-e55318c84f199d6056a0bcd98bc4612d01ccfe80.zip
crypto: gf128mul - switch gf128mul_x_ble to le128
Currently, gf128mul_x_ble works with pointers to be128, even though it actually interprets the words as little-endian. Consequently, it uses cpu_to_le64/le64_to_cpu on fields of type __be64, which is incorrect. This patch fixes that by changing the function to accept pointers to le128 and updating all users accordingly. Signed-off-by: Ondrej Mosnacek <omosnacek@gmail.com> Reviewd-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/xts.c38
1 files changed, 19 insertions, 19 deletions
diff --git a/crypto/xts.c b/crypto/xts.c
index c976bfac29da..e197e64eb45c 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -39,11 +39,11 @@ struct xts_instance_ctx {
};
struct rctx {
- be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
+ le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
- be128 t;
+ le128 t;
- be128 *ext;
+ le128 *ext;
struct scatterlist srcbuf[2];
struct scatterlist dstbuf[2];
@@ -99,7 +99,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
static int post_crypt(struct skcipher_request *req)
{
struct rctx *rctx = skcipher_request_ctx(req);
- be128 *buf = rctx->ext ?: rctx->buf;
+ le128 *buf = rctx->ext ?: rctx->buf;
struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
@@ -112,12 +112,12 @@ static int post_crypt(struct skcipher_request *req)
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wdst;
+ le128 *wdst;
wdst = w.dst.virt.addr;
do {
- be128_xor(wdst, buf++, wdst);
+ le128_xor(wdst, buf++, wdst);
wdst++;
} while ((avail -= bs) >= bs);
@@ -150,7 +150,7 @@ out:
static int pre_crypt(struct skcipher_request *req)
{
struct rctx *rctx = skcipher_request_ctx(req);
- be128 *buf = rctx->ext ?: rctx->buf;
+ le128 *buf = rctx->ext ?: rctx->buf;
struct skcipher_request *subreq;
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
@@ -174,15 +174,15 @@ static int pre_crypt(struct skcipher_request *req)
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wsrc;
- be128 *wdst;
+ le128 *wsrc;
+ le128 *wdst;
wsrc = w.src.virt.addr;
wdst = w.dst.virt.addr;
do {
*buf++ = rctx->t;
- be128_xor(wdst++, &rctx->t, wsrc++);
+ le128_xor(wdst++, &rctx->t, wsrc++);
gf128mul_x_ble(&rctx->t, &rctx->t);
} while ((avail -= bs) >= bs);
@@ -353,8 +353,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
const unsigned int max_blks = req->tbuflen / bsize;
struct blkcipher_walk walk;
unsigned int nblocks;
- be128 *src, *dst, *t;
- be128 *t_buf = req->tbuf;
+ le128 *src, *dst, *t;
+ le128 *t_buf = req->tbuf;
int err, i;
BUG_ON(max_blks < 1);
@@ -367,8 +367,8 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
return err;
nblocks = min(nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
+ src = (le128 *)walk.src.virt.addr;
+ dst = (le128 *)walk.dst.virt.addr;
/* calculate first value of T */
req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
@@ -384,7 +384,7 @@ first:
t = &t_buf[i];
/* PP <- T xor P */
- be128_xor(dst + i, t, src + i);
+ le128_xor(dst + i, t, src + i);
}
/* CC <- E(Key2,PP) */
@@ -393,7 +393,7 @@ first:
/* C <- T xor CC */
for (i = 0; i < nblocks; i++)
- be128_xor(dst + i, dst + i, &t_buf[i]);
+ le128_xor(dst + i, dst + i, &t_buf[i]);
src += nblocks;
dst += nblocks;
@@ -401,7 +401,7 @@ first:
nblocks = min(nbytes / bsize, max_blks);
} while (nblocks > 0);
- *(be128 *)walk.iv = *t;
+ *(le128 *)walk.iv = *t;
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
@@ -409,8 +409,8 @@ first:
break;
nblocks = min(nbytes / bsize, max_blks);
- src = (be128 *)walk.src.virt.addr;
- dst = (be128 *)walk.dst.virt.addr;
+ src = (le128 *)walk.src.virt.addr;
+ dst = (le128 *)walk.dst.virt.addr;
}
return err;